2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "webrtc/modules/audio_device/audio_device_config.h"
12 #include "webrtc/modules/audio_device/audio_device_utility.h"
13 #include "webrtc/modules/audio_device/mac/audio_device_mac.h"
15 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h"
16 #include "webrtc/system_wrappers/interface/event_wrapper.h"
17 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
18 #include "webrtc/system_wrappers/interface/trace.h"
20 #include <ApplicationServices/ApplicationServices.h>
22 #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap()
23 #include <mach/mach.h> // mach_task_self()
24 #include <sys/sysctl.h> // sysctlbyname()
31 #define WEBRTC_CA_RETURN_ON_ERR(expr) \
35 logCAMsg(kTraceError, kTraceAudioDevice, _id, \
36 "Error in " #expr, (const char *)&err); \
41 #define WEBRTC_CA_LOG_ERR(expr) \
45 logCAMsg(kTraceError, kTraceAudioDevice, _id, \
46 "Error in " #expr, (const char *)&err); \
50 #define WEBRTC_CA_LOG_WARN(expr) \
54 logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \
55 "Error in " #expr, (const char *)&err); \
59 #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
66 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue)
70 int32_t oldValue = *theValue;
71 if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue)
79 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue)
83 int32_t value = *theValue;
84 if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true)
91 // CoreAudio errors are best interpreted as four character strings.
92 void AudioDeviceMac::logCAMsg(const TraceLevel level,
93 const TraceModule module,
94 const int32_t id, const char *msg,
100 #ifdef WEBRTC_ARCH_BIG_ENDIAN
101 WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
103 // We need to flip the characters in this case.
104 WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err
109 AudioDeviceMac::AudioDeviceMac(const int32_t id) :
110 _ptrAudioBuffer(NULL),
111 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
112 _stopEventRec(*EventWrapper::Create()),
113 _stopEvent(*EventWrapper::Create()),
114 _captureWorkerThread(NULL),
115 _renderWorkerThread(NULL),
116 _captureWorkerThreadId(0),
117 _renderWorkerThreadId(0),
120 _inputDeviceIndex(0),
121 _outputDeviceIndex(0),
122 _inputDeviceID(kAudioObjectUnknown),
123 _outputDeviceID(kAudioObjectUnknown),
124 _inputDeviceIsSpecified(false),
125 _outputDeviceIsSpecified(false),
126 _recChannels(N_REC_CHANNELS),
127 _playChannels(N_PLAY_CHANNELS),
128 _captureBufData(NULL),
129 _renderBufData(NULL),
130 _playBufType(AudioDeviceModule::kFixedBufferSize),
135 _recIsInitialized(false),
136 _playIsInitialized(false),
138 _renderDeviceIsAlive(1),
139 _captureDeviceIsAlive(1),
144 _macBookProPanRight(false),
145 _captureLatencyUs(0),
149 _renderDelayOffsetSamples(0),
150 _playBufDelayFixed(20),
155 _paCaptureBuffer(NULL),
156 _paRenderBuffer(NULL),
157 _captureBufSizeSamples(0),
158 _renderBufSizeSamples(0),
161 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
162 "%s created", __FUNCTION__);
164 assert(&_stopEvent != NULL);
165 assert(&_stopEventRec != NULL);
167 memset(_renderConvertData, 0, sizeof(_renderConvertData));
168 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription));
169 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
170 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription));
171 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription));
175 AudioDeviceMac::~AudioDeviceMac()
177 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
178 "%s destroyed", __FUNCTION__);
185 if (_captureWorkerThread)
187 delete _captureWorkerThread;
188 _captureWorkerThread = NULL;
191 if (_renderWorkerThread)
193 delete _renderWorkerThread;
194 _renderWorkerThread = NULL;
199 delete _paRenderBuffer;
200 _paRenderBuffer = NULL;
203 if (_paCaptureBuffer)
205 delete _paCaptureBuffer;
206 _paCaptureBuffer = NULL;
211 delete[] _renderBufData;
212 _renderBufData = NULL;
217 delete[] _captureBufData;
218 _captureBufData = NULL;
221 kern_return_t kernErr = KERN_SUCCESS;
222 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
223 if (kernErr != KERN_SUCCESS)
225 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
226 " semaphore_destroy() error: %d", kernErr);
229 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
230 if (kernErr != KERN_SUCCESS)
232 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
233 " semaphore_destroy() error: %d", kernErr);
237 delete &_stopEventRec;
241 // ============================================================================
243 // ============================================================================
245 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
248 CriticalSectionScoped lock(&_critSect);
250 _ptrAudioBuffer = audioBuffer;
252 // inform the AudioBuffer about default settings for this implementation
253 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
254 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
255 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
256 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
259 int32_t AudioDeviceMac::ActiveAudioLayer(
260 AudioDeviceModule::AudioLayer& audioLayer) const
262 audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
266 int32_t AudioDeviceMac::Init()
269 CriticalSectionScoped lock(&_critSect);
276 OSStatus err = noErr;
280 // PortAudio ring buffers require an elementCount which is a power of two.
281 if (_renderBufData == NULL)
283 UInt32 powerOfTwo = 1;
284 while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES)
288 _renderBufSizeSamples = powerOfTwo;
289 _renderBufData = new SInt16[_renderBufSizeSamples];
292 if (_paRenderBuffer == NULL)
294 _paRenderBuffer = new PaUtilRingBuffer;
295 ring_buffer_size_t bufSize = -1;
296 bufSize = PaUtil_InitializeRingBuffer(_paRenderBuffer, sizeof(SInt16),
297 _renderBufSizeSamples,
301 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
302 _id, " PaUtil_InitializeRingBuffer() error");
307 if (_captureBufData == NULL)
309 UInt32 powerOfTwo = 1;
310 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES)
314 _captureBufSizeSamples = powerOfTwo;
315 _captureBufData = new Float32[_captureBufSizeSamples];
318 if (_paCaptureBuffer == NULL)
320 _paCaptureBuffer = new PaUtilRingBuffer;
321 ring_buffer_size_t bufSize = -1;
322 bufSize = PaUtil_InitializeRingBuffer(_paCaptureBuffer,
324 _captureBufSizeSamples,
328 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
329 _id, " PaUtil_InitializeRingBuffer() error");
334 if (_renderWorkerThread == NULL)
337 = ThreadWrapper::CreateThread(RunRender, this, kRealtimePriority,
338 "RenderWorkerThread");
339 if (_renderWorkerThread == NULL)
341 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
342 _id, " Render CreateThread() error");
347 if (_captureWorkerThread == NULL)
350 = ThreadWrapper::CreateThread(RunCapture, this, kRealtimePriority,
351 "CaptureWorkerThread");
352 if (_captureWorkerThread == NULL)
354 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
355 _id, " Capture CreateThread() error");
360 kern_return_t kernErr = KERN_SUCCESS;
361 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
362 SYNC_POLICY_FIFO, 0);
363 if (kernErr != KERN_SUCCESS)
365 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
366 " semaphore_create() error: %d", kernErr);
370 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
371 SYNC_POLICY_FIFO, 0);
372 if (kernErr != KERN_SUCCESS)
374 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
375 " semaphore_create() error: %d", kernErr);
379 // Setting RunLoop to NULL here instructs HAL to manage its own thread for
380 // notifications. This was the default behaviour on OS X 10.5 and earlier,
381 // but now must be explicitly specified. HAL would otherwise try to use the
382 // main thread to issue notifications.
383 AudioObjectPropertyAddress propertyAddress = {
384 kAudioHardwarePropertyRunLoop,
385 kAudioObjectPropertyScopeGlobal,
386 kAudioObjectPropertyElementMaster };
387 CFRunLoopRef runLoop = NULL;
388 UInt32 size = sizeof(CFRunLoopRef);
389 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject,
390 &propertyAddress, 0, NULL, size, &runLoop));
392 // Listen for any device changes.
393 propertyAddress.mSelector = kAudioHardwarePropertyDevices;
394 WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject,
395 &propertyAddress, &objectListenerProc, this));
397 // Determine if this is a MacBook Pro
399 _macBookProPanRight = false;
401 size_t length = sizeof(buf);
402 memset(buf, 0, length);
404 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
407 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
408 " Error in sysctlbyname(): %d", err);
411 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
412 " Hardware model: %s", buf);
413 if (strncmp(buf, "MacBookPro", 10) == 0)
429 int32_t AudioDeviceMac::Terminate()
439 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
440 " Recording must be stopped");
446 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
447 " Playback must be stopped");
453 _mixerManager.Close();
455 OSStatus err = noErr;
458 AudioObjectPropertyAddress propertyAddress = {
459 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
460 kAudioObjectPropertyElementMaster };
461 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(kAudioObjectSystemObject,
462 &propertyAddress, &objectListenerProc, this));
464 err = AudioHardwareUnload();
467 logCAMsg(kTraceError, kTraceAudioDevice, _id,
468 "Error in AudioHardwareUnload()", (const char*) &err);
475 _initialized = false;
476 _outputDeviceIsSpecified = false;
477 _inputDeviceIsSpecified = false;
482 bool AudioDeviceMac::Initialized() const
484 return (_initialized);
487 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available)
490 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
492 // Make an attempt to open up the
493 // output mixer corresponding to the currently selected output device.
495 if (!wasInitialized && InitSpeaker() == -1)
501 // Given that InitSpeaker was successful, we know that a valid speaker
505 // Close the initialized output mixer
509 _mixerManager.CloseSpeaker();
515 int32_t AudioDeviceMac::InitSpeaker()
518 CriticalSectionScoped lock(&_critSect);
525 if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1)
530 if (_inputDeviceID == _outputDeviceID)
538 if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1)
546 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available)
549 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
551 // Make an attempt to open up the
552 // input mixer corresponding to the currently selected output device.
554 if (!wasInitialized && InitMicrophone() == -1)
560 // Given that InitMicrophone was successful, we know that a valid microphone
564 // Close the initialized input mixer
568 _mixerManager.CloseMicrophone();
574 int32_t AudioDeviceMac::InitMicrophone()
577 CriticalSectionScoped lock(&_critSect);
584 if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1)
589 if (_inputDeviceID == _outputDeviceID)
597 if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1)
605 bool AudioDeviceMac::SpeakerIsInitialized() const
607 return (_mixerManager.SpeakerIsInitialized());
610 bool AudioDeviceMac::MicrophoneIsInitialized() const
612 return (_mixerManager.MicrophoneIsInitialized());
615 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available)
618 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
620 // Make an attempt to open up the
621 // output mixer corresponding to the currently selected output device.
623 if (!wasInitialized && InitSpeaker() == -1)
625 // If we end up here it means that the selected speaker has no volume
631 // Given that InitSpeaker was successful, we know that a volume control exists
635 // Close the initialized output mixer
639 _mixerManager.CloseSpeaker();
645 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume)
648 return (_mixerManager.SetSpeakerVolume(volume));
651 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const
656 if (_mixerManager.SpeakerVolume(level) == -1)
665 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft,
666 uint16_t volumeRight)
669 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
670 " API call not supported on this platform");
675 AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/,
676 uint16_t& /*volumeRight*/) const
679 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
680 " API call not supported on this platform");
684 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const
689 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
698 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const
703 if (_mixerManager.MinSpeakerVolume(minVol) == -1)
713 AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const
718 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
727 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available)
730 bool isAvailable(false);
731 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
733 // Make an attempt to open up the
734 // output mixer corresponding to the currently selected output device.
736 if (!wasInitialized && InitSpeaker() == -1)
738 // If we end up here it means that the selected speaker has no volume
739 // control, hence it is safe to state that there is no mute control
740 // already at this stage.
745 // Check if the selected speaker has a mute control
747 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
749 available = isAvailable;
751 // Close the initialized output mixer
755 _mixerManager.CloseSpeaker();
761 int32_t AudioDeviceMac::SetSpeakerMute(bool enable)
763 return (_mixerManager.SetSpeakerMute(enable));
766 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const
771 if (_mixerManager.SpeakerMute(muted) == -1)
780 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available)
783 bool isAvailable(false);
784 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
786 // Make an attempt to open up the
787 // input mixer corresponding to the currently selected input device.
789 if (!wasInitialized && InitMicrophone() == -1)
791 // If we end up here it means that the selected microphone has no volume
792 // control, hence it is safe to state that there is no boost control
793 // already at this stage.
798 // Check if the selected microphone has a mute control
800 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
801 available = isAvailable;
803 // Close the initialized input mixer
807 _mixerManager.CloseMicrophone();
813 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable)
815 return (_mixerManager.SetMicrophoneMute(enable));
818 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const
823 if (_mixerManager.MicrophoneMute(muted) == -1)
832 int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available)
835 bool isAvailable(false);
836 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
838 // Enumerate all avaliable microphone and make an attempt to open up the
839 // input mixer corresponding to the currently selected input device.
841 if (!wasInitialized && InitMicrophone() == -1)
843 // If we end up here it means that the selected microphone has no volume
844 // control, hence it is safe to state that there is no boost control
845 // already at this stage.
850 // Check if the selected microphone has a boost control
852 _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
853 available = isAvailable;
855 // Close the initialized input mixer
859 _mixerManager.CloseMicrophone();
865 int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable)
868 return (_mixerManager.SetMicrophoneBoost(enable));
871 int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const
876 if (_mixerManager.MicrophoneBoost(onOff) == -1)
885 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available)
888 bool isAvailable(false);
889 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
891 if (!wasInitialized && InitMicrophone() == -1)
893 // Cannot open the specified device
898 // Check if the selected microphone can record stereo
900 _mixerManager.StereoRecordingIsAvailable(isAvailable);
901 available = isAvailable;
903 // Close the initialized input mixer
907 _mixerManager.CloseMicrophone();
913 int32_t AudioDeviceMac::SetStereoRecording(bool enable)
924 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const
927 if (_recChannels == 2)
935 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available)
938 bool isAvailable(false);
939 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
941 if (!wasInitialized && InitSpeaker() == -1)
943 // Cannot open the specified device
948 // Check if the selected microphone can record stereo
950 _mixerManager.StereoPlayoutIsAvailable(isAvailable);
951 available = isAvailable;
953 // Close the initialized input mixer
957 _mixerManager.CloseSpeaker();
963 int32_t AudioDeviceMac::SetStereoPlayout(bool enable)
974 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const
977 if (_playChannels == 2)
985 int32_t AudioDeviceMac::SetAGC(bool enable)
993 bool AudioDeviceMac::AGC() const
999 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available)
1002 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
1004 // Make an attempt to open up the
1005 // input mixer corresponding to the currently selected output device.
1007 if (!wasInitialized && InitMicrophone() == -1)
1009 // If we end up here it means that the selected microphone has no volume
1015 // Given that InitMicrophone was successful, we know that a volume control
1020 // Close the initialized input mixer
1022 if (!wasInitialized)
1024 _mixerManager.CloseMicrophone();
1030 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume)
1033 return (_mixerManager.SetMicrophoneVolume(volume));
1036 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const
1041 if (_mixerManager.MicrophoneVolume(level) == -1)
1043 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1044 " failed to retrive current microphone level");
1053 AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const
1058 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
1068 AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const
1073 if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
1083 AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const
1088 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
1097 int16_t AudioDeviceMac::PlayoutDevices()
1100 AudioDeviceID playDevices[MaxNumberDevices];
1101 return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices,
1105 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index)
1108 if (_playIsInitialized)
1113 AudioDeviceID playDevices[MaxNumberDevices];
1114 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
1115 playDevices, MaxNumberDevices);
1116 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1117 " number of availiable waveform-audio output devices is %u",
1120 if (index > (nDevices - 1))
1122 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1123 " device index is out of range [0,%u]", (nDevices - 1));
1127 _outputDeviceIndex = index;
1128 _outputDeviceIsSpecified = true;
1133 int32_t AudioDeviceMac::SetPlayoutDevice(
1134 AudioDeviceModule::WindowsDeviceType /*device*/)
1136 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1137 "WindowsDeviceType not supported");
1141 int32_t AudioDeviceMac::PlayoutDeviceName(
1143 char name[kAdmMaxDeviceNameSize],
1144 char guid[kAdmMaxGuidSize])
1147 const uint16_t nDevices(PlayoutDevices());
1149 if ((index > (nDevices - 1)) || (name == NULL))
1154 memset(name, 0, kAdmMaxDeviceNameSize);
1158 memset(guid, 0, kAdmMaxGuidSize);
1161 return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name);
1164 int32_t AudioDeviceMac::RecordingDeviceName(
1166 char name[kAdmMaxDeviceNameSize],
1167 char guid[kAdmMaxGuidSize])
1170 const uint16_t nDevices(RecordingDevices());
1172 if ((index > (nDevices - 1)) || (name == NULL))
1177 memset(name, 0, kAdmMaxDeviceNameSize);
1181 memset(guid, 0, kAdmMaxGuidSize);
1184 return GetDeviceName(kAudioDevicePropertyScopeInput, index, name);
1187 int16_t AudioDeviceMac::RecordingDevices()
1190 AudioDeviceID recDevices[MaxNumberDevices];
1191 return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices,
1195 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index)
1198 if (_recIsInitialized)
1203 AudioDeviceID recDevices[MaxNumberDevices];
1204 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
1205 recDevices, MaxNumberDevices);
1206 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1207 " number of availiable waveform-audio input devices is %u",
1210 if (index > (nDevices - 1))
1212 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1213 " device index is out of range [0,%u]", (nDevices - 1));
1217 _inputDeviceIndex = index;
1218 _inputDeviceIsSpecified = true;
1225 AudioDeviceMac::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType /*device*/)
1227 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1228 "WindowsDeviceType not supported");
1232 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available)
1237 // Try to initialize the playout side
1238 if (InitPlayout() == -1)
1243 // We destroy the IOProc created by InitPlayout() in implDeviceIOProc().
1244 // We must actually start playout here in order to have the IOProc
1245 // deleted by calling StopPlayout().
1246 if (StartPlayout() == -1)
1251 // Cancel effect of initialization
1252 if (StopPlayout() == -1)
1260 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available)
1265 // Try to initialize the recording side
1266 if (InitRecording() == -1)
1271 // We destroy the IOProc created by InitRecording() in implInDeviceIOProc().
1272 // We must actually start recording here in order to have the IOProc
1273 // deleted by calling StopRecording().
1274 if (StartRecording() == -1)
1279 // Cancel effect of initialization
1280 if (StopRecording() == -1)
1288 int32_t AudioDeviceMac::InitPlayout()
1291 CriticalSectionScoped lock(&_critSect);
1298 if (!_outputDeviceIsSpecified)
1303 if (_playIsInitialized)
1308 // Initialize the speaker (devices might have been added or removed)
1309 if (InitSpeaker() == -1)
1311 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1312 " InitSpeaker() failed");
1315 if (!MicrophoneIsInitialized())
1317 // Make this call to check if we are using
1318 // one or two devices (_twoDevices)
1319 bool available = false;
1320 if (MicrophoneIsAvailable(available) == -1)
1322 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1323 " MicrophoneIsAvailable() failed");
1327 PaUtil_FlushRingBuffer(_paRenderBuffer);
1329 OSStatus err = noErr;
1331 _renderDelayOffsetSamples = 0;
1333 _renderLatencyUs = 0;
1334 _renderDeviceIsAlive = 1;
1337 // The internal microphone of a MacBook Pro is located under the left speaker
1338 // grille. When the internal speakers are in use, we want to fully stereo
1339 // pan to the right.
1340 AudioObjectPropertyAddress
1341 propertyAddress = { kAudioDevicePropertyDataSource,
1342 kAudioDevicePropertyScopeOutput, 0 };
1345 _macBookProPanRight = false;
1346 Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
1350 UInt32 dataSource = 0;
1351 size = sizeof(dataSource);
1352 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(_outputDeviceID,
1353 &propertyAddress, 0, NULL, &size, &dataSource));
1355 if (dataSource == 'ispk')
1357 _macBookProPanRight = true;
1358 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
1360 "MacBook Pro using internal speakers; stereo"
1364 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice,
1365 _id, "MacBook Pro not using internal speakers");
1368 // Add a listener to determine if the status changes.
1369 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
1370 &propertyAddress, &objectListenerProc, this));
1374 // Get current stream description
1375 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1376 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat));
1377 size = sizeof(_outStreamFormat);
1378 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
1379 &propertyAddress, 0, NULL, &size, &_outStreamFormat));
1381 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM)
1383 logCAMsg(kTraceError, kTraceAudioDevice, _id,
1384 "Unacceptable output stream format -> mFormatID",
1385 (const char *) &_outStreamFormat.mFormatID);
1389 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
1391 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1392 "Too many channels on output device (mChannelsPerFrame = %d)",
1393 _outStreamFormat.mChannelsPerFrame);
1397 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved)
1399 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1400 "Non-interleaved audio data is not supported.",
1401 "AudioHardware streams should not have this format.");
1405 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1406 "Ouput stream format:");
1407 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1408 "mSampleRate = %f, mChannelsPerFrame = %u",
1409 _outStreamFormat.mSampleRate,
1410 _outStreamFormat.mChannelsPerFrame);
1411 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1412 "mBytesPerPacket = %u, mFramesPerPacket = %u",
1413 _outStreamFormat.mBytesPerPacket,
1414 _outStreamFormat.mFramesPerPacket);
1415 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1416 "mBytesPerFrame = %u, mBitsPerChannel = %u",
1417 _outStreamFormat.mBytesPerFrame,
1418 _outStreamFormat.mBitsPerChannel);
1419 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1420 "mFormatFlags = %u",
1421 _outStreamFormat.mFormatFlags);
1422 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
1423 (const char *) &_outStreamFormat.mFormatID);
1425 // Our preferred format to work with
1426 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC;
1427 if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2))
1429 _outDesiredFormat.mChannelsPerFrame = 2;
1432 // Disable stereo playout when we only have one channel on the device.
1433 _outDesiredFormat.mChannelsPerFrame = 1;
1435 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1436 "Stereo playout unavailable on this device");
1439 if (_ptrAudioBuffer)
1441 // Update audio buffer with the selected parameters
1442 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
1443 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1446 _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT
1447 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame;
1449 _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame
1451 _outDesiredFormat.mFramesPerPacket = 1; // In uncompressed audio,
1452 // a packet is one frame.
1453 _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame
1455 _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1457 _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1458 | kLinearPCMFormatFlagIsPacked;
1459 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1460 _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1462 _outDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1464 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, &_outStreamFormat,
1465 &_renderConverter));
1467 // First try to set buffer size to desired value (_playBufDelayFixed)
1468 UInt32 bufByteCount = (UInt32)((_outStreamFormat.mSampleRate / 1000.0)
1469 * _playBufDelayFixed * _outStreamFormat.mChannelsPerFrame
1471 if (_outStreamFormat.mFramesPerPacket != 0)
1473 if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0)
1475 bufByteCount = ((UInt32)(bufByteCount
1476 / _outStreamFormat.mFramesPerPacket) + 1)
1477 * _outStreamFormat.mFramesPerPacket;
1481 // Ensure the buffer size is within the acceptable range provided by the device.
1482 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1483 AudioValueRange range;
1484 size = sizeof(range);
1485 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
1486 &propertyAddress, 0, NULL, &size, &range));
1487 if (range.mMinimum > bufByteCount)
1489 bufByteCount = range.mMinimum;
1490 } else if (range.mMaximum < bufByteCount)
1492 bufByteCount = range.mMaximum;
1495 propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1496 size = sizeof(bufByteCount);
1497 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID,
1498 &propertyAddress, 0, NULL, size, &bufByteCount));
1500 // Get render device latency
1501 propertyAddress.mSelector = kAudioDevicePropertyLatency;
1503 size = sizeof(UInt32);
1504 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
1505 &propertyAddress, 0, NULL, &size, &latency));
1506 _renderLatencyUs = (uint32_t) ((1.0e6 * latency)
1507 / _outStreamFormat.mSampleRate);
1509 // Get render stream latency
1510 propertyAddress.mSelector = kAudioDevicePropertyStreams;
1511 AudioStreamID stream = 0;
1512 size = sizeof(AudioStreamID);
1513 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
1514 &propertyAddress, 0, NULL, &size, &stream));
1515 propertyAddress.mSelector = kAudioStreamPropertyLatency;
1516 size = sizeof(UInt32);
1518 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID,
1519 &propertyAddress, 0, NULL, &size, &latency));
1520 _renderLatencyUs += (uint32_t) ((1.0e6 * latency)
1521 / _outStreamFormat.mSampleRate);
1523 // Listen for format changes
1524 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1525 WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_outputDeviceID,
1526 &propertyAddress, &objectListenerProc, this));
1528 // Listen for processor overloads
1529 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1530 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID,
1531 &propertyAddress, &objectListenerProc, this));
1533 if (_twoDevices || !_recIsInitialized)
1535 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_outputDeviceID,
1536 deviceIOProc, this, &_deviceIOProcID));
1539 // Mark playout side as initialized
1540 _playIsInitialized = true;
1542 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1543 " initial playout status: _renderDelayOffsetSamples=%d,"
1544 " _renderDelayUs=%d, _renderLatencyUs=%d",
1545 _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
1550 int32_t AudioDeviceMac::InitRecording()
1553 CriticalSectionScoped lock(&_critSect);
1560 if (!_inputDeviceIsSpecified)
1565 if (_recIsInitialized)
1570 // Initialize the microphone (devices might have been added or removed)
1571 if (InitMicrophone() == -1)
1573 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1574 " InitMicrophone() failed");
1577 if (!SpeakerIsInitialized())
1579 // Make this call to check if we are using
1580 // one or two devices (_twoDevices)
1581 bool available = false;
1582 if (SpeakerIsAvailable(available) == -1)
1584 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1585 " SpeakerIsAvailable() failed");
1589 OSStatus err = noErr;
1592 PaUtil_FlushRingBuffer(_paCaptureBuffer);
1594 _captureDelayUs = 0;
1595 _captureLatencyUs = 0;
1596 _captureDeviceIsAlive = 1;
1599 // Get current stream description
1600 AudioObjectPropertyAddress
1601 propertyAddress = { kAudioDevicePropertyStreamFormat,
1602 kAudioDevicePropertyScopeInput, 0 };
1603 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat));
1604 size = sizeof(_inStreamFormat);
1605 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
1606 &propertyAddress, 0, NULL, &size, &_inStreamFormat));
1608 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM)
1610 logCAMsg(kTraceError, kTraceAudioDevice, _id,
1611 "Unacceptable input stream format -> mFormatID",
1612 (const char *) &_inStreamFormat.mFormatID);
1616 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
1618 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1619 "Too many channels on input device (mChannelsPerFrame = %d)",
1620 _inStreamFormat.mChannelsPerFrame);
1624 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame *
1625 _inStreamFormat.mSampleRate / 100 * N_BLOCKS_IO;
1626 if (io_block_size_samples > _captureBufSizeSamples)
1628 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1629 "Input IO block size (%d) is larger than ring buffer (%u)",
1630 io_block_size_samples, _captureBufSizeSamples);
1634 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1635 " Input stream format:");
1636 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1637 " mSampleRate = %f, mChannelsPerFrame = %u",
1638 _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
1639 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1640 " mBytesPerPacket = %u, mFramesPerPacket = %u",
1641 _inStreamFormat.mBytesPerPacket,
1642 _inStreamFormat.mFramesPerPacket);
1643 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1644 " mBytesPerFrame = %u, mBitsPerChannel = %u",
1645 _inStreamFormat.mBytesPerFrame,
1646 _inStreamFormat.mBitsPerChannel);
1647 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1648 " mFormatFlags = %u",
1649 _inStreamFormat.mFormatFlags);
1650 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
1651 (const char *) &_inStreamFormat.mFormatID);
1653 // Our preferred format to work with
1654 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2))
1656 _inDesiredFormat.mChannelsPerFrame = 2;
1659 // Disable stereo recording when we only have one channel on the device.
1660 _inDesiredFormat.mChannelsPerFrame = 1;
1662 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1663 "Stereo recording unavailable on this device");
1666 if (_ptrAudioBuffer)
1668 // Update audio buffer with the selected parameters
1669 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
1670 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1673 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC;
1674 _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame
1676 _inDesiredFormat.mFramesPerPacket = 1;
1677 _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame
1679 _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8;
1681 _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1682 | kLinearPCMFormatFlagIsPacked;
1683 #ifdef WEBRTC_ARCH_BIG_ENDIAN
1684 _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
1686 _inDesiredFormat.mFormatID = kAudioFormatLinearPCM;
1688 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat,
1689 &_captureConverter));
1691 // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO)
1692 // TODO(xians): investigate this block.
1693 UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0)
1694 * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame
1696 if (_inStreamFormat.mFramesPerPacket != 0)
1698 if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0)
1700 bufByteCount = ((UInt32)(bufByteCount
1701 / _inStreamFormat.mFramesPerPacket) + 1)
1702 * _inStreamFormat.mFramesPerPacket;
1706 // Ensure the buffer size is within the acceptable range provided by the device.
1707 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange;
1708 AudioValueRange range;
1709 size = sizeof(range);
1710 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
1711 &propertyAddress, 0, NULL, &size, &range));
1712 if (range.mMinimum > bufByteCount)
1714 bufByteCount = range.mMinimum;
1715 } else if (range.mMaximum < bufByteCount)
1717 bufByteCount = range.mMaximum;
1720 propertyAddress.mSelector = kAudioDevicePropertyBufferSize;
1721 size = sizeof(bufByteCount);
1722 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID,
1723 &propertyAddress, 0, NULL, size, &bufByteCount));
1725 // Get capture device latency
1726 propertyAddress.mSelector = kAudioDevicePropertyLatency;
1728 size = sizeof(UInt32);
1729 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
1730 &propertyAddress, 0, NULL, &size, &latency));
1731 _captureLatencyUs = (UInt32)((1.0e6 * latency)
1732 / _inStreamFormat.mSampleRate);
1734 // Get capture stream latency
1735 propertyAddress.mSelector = kAudioDevicePropertyStreams;
1736 AudioStreamID stream = 0;
1737 size = sizeof(AudioStreamID);
1738 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
1739 &propertyAddress, 0, NULL, &size, &stream));
1740 propertyAddress.mSelector = kAudioStreamPropertyLatency;
1741 size = sizeof(UInt32);
1743 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID,
1744 &propertyAddress, 0, NULL, &size, &latency));
1745 _captureLatencyUs += (UInt32)((1.0e6 * latency)
1746 / _inStreamFormat.mSampleRate);
1748 // Listen for format changes
1749 // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged?
1750 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat;
1751 WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_inputDeviceID,
1752 &propertyAddress, &objectListenerProc, this));
1754 // Listen for processor overloads
1755 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1756 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID,
1757 &propertyAddress, &objectListenerProc, this));
1761 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
1762 inDeviceIOProc, this, &_inDeviceIOProcID));
1763 } else if (!_playIsInitialized)
1765 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID,
1766 deviceIOProc, this, &_deviceIOProcID));
1769 // Mark recording side as initialized
1770 _recIsInitialized = true;
1775 int32_t AudioDeviceMac::StartRecording()
1778 CriticalSectionScoped lock(&_critSect);
1780 if (!_recIsInitialized)
1792 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1793 " Recording worker thread has not been started");
1797 OSStatus err = noErr;
1799 unsigned int threadID(0);
1800 if (_captureWorkerThread != NULL)
1802 _captureWorkerThread->Start(threadID);
1804 _captureWorkerThreadId = threadID;
1808 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID));
1809 } else if (!_playing)
1811 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID));
1819 int32_t AudioDeviceMac::StopRecording()
1822 CriticalSectionScoped lock(&_critSect);
1824 if (!_recIsInitialized)
1829 OSStatus err = noErr;
1832 int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive);
1835 if (_recording && captureDeviceIsAlive == 1)
1838 _doStopRec = true; // Signal to io proc to stop audio device
1839 _critSect.Leave(); // Cannot be under lock, risk of deadlock
1840 if (kEventTimeout == _stopEventRec.Wait(2000))
1842 CriticalSectionScoped critScoped(&_critSect);
1843 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1844 " Timed out stopping the capture IOProc. "
1845 "We may have failed to detect a device removal.");
1847 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID,
1848 _inDeviceIOProcID));
1850 AudioDeviceDestroyIOProcID(_inputDeviceID,
1851 _inDeviceIOProcID));
1855 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1856 " Recording stopped");
1861 // We signal a stop for a shared device even when rendering has
1862 // not yet ended. This is to ensure the IOProc will return early as
1863 // intended (by checking |_recording|) before accessing
1864 // resources we free below (e.g. the capture converter).
1866 // In the case of a shared devcie, the IOProc will verify
1867 // rendering has ended before stopping itself.
1868 if (_recording && captureDeviceIsAlive == 1)
1871 _doStop = true; // Signal to io proc to stop audio device
1872 _critSect.Leave(); // Cannot be under lock, risk of deadlock
1873 if (kEventTimeout == _stopEvent.Wait(2000))
1875 CriticalSectionScoped critScoped(&_critSect);
1876 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1877 " Timed out stopping the shared IOProc. "
1878 "We may have failed to detect a device removal.");
1880 // We assume rendering on a shared device has stopped as well if
1881 // the IOProc times out.
1882 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID,
1884 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
1889 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1890 " Recording stopped (shared)");
1894 // Setting this signal will allow the worker thread to be stopped.
1895 AtomicSet32(&_captureDeviceIsAlive, 0);
1897 if (_captureWorkerThread != NULL)
1899 if (!_captureWorkerThread->Stop())
1901 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1902 " Timed out waiting for the render worker thread to "
1908 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter));
1910 // Remove listeners.
1911 AudioObjectPropertyAddress
1912 propertyAddress = { kAudioDevicePropertyStreamFormat,
1913 kAudioDevicePropertyScopeInput, 0 };
1914 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
1915 &propertyAddress, &objectListenerProc, this));
1917 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
1918 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID,
1919 &propertyAddress, &objectListenerProc, this));
1921 _recIsInitialized = false;
1927 bool AudioDeviceMac::RecordingIsInitialized() const
1929 return (_recIsInitialized);
1932 bool AudioDeviceMac::Recording() const
1934 return (_recording);
1937 bool AudioDeviceMac::PlayoutIsInitialized() const
1939 return (_playIsInitialized);
1942 int32_t AudioDeviceMac::StartPlayout()
1945 CriticalSectionScoped lock(&_critSect);
1947 if (!_playIsInitialized)
1957 OSStatus err = noErr;
1959 unsigned int threadID(0);
1960 if (_renderWorkerThread != NULL)
1962 _renderWorkerThread->Start(threadID);
1964 _renderWorkerThreadId = threadID;
1966 if (_twoDevices || !_recording)
1968 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID));
1975 int32_t AudioDeviceMac::StopPlayout()
1978 CriticalSectionScoped lock(&_critSect);
1980 if (!_playIsInitialized)
1985 OSStatus err = noErr;
1987 int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive);
1988 if (_playing && renderDeviceIsAlive == 1)
1990 // We signal a stop for a shared device even when capturing has not
1991 // yet ended. This is to ensure the IOProc will return early as
1992 // intended (by checking |_playing|) before accessing resources we
1993 // free below (e.g. the render converter).
1995 // In the case of a shared device, the IOProc will verify capturing
1996 // has ended before stopping itself.
1998 _doStop = true; // Signal to io proc to stop audio device
1999 _critSect.Leave(); // Cannot be under lock, risk of deadlock
2000 if (kEventTimeout == _stopEvent.Wait(2000))
2002 CriticalSectionScoped critScoped(&_critSect);
2003 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2004 " Timed out stopping the render IOProc. "
2005 "We may have failed to detect a device removal.");
2007 // We assume capturing on a shared device has stopped as well if the
2008 // IOProc times out.
2009 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID,
2011 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
2016 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2020 // Setting this signal will allow the worker thread to be stopped.
2021 AtomicSet32(&_renderDeviceIsAlive, 0);
2023 if (_renderWorkerThread != NULL)
2025 if (!_renderWorkerThread->Stop())
2027 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2028 " Timed out waiting for the render worker thread to "
2034 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter));
2036 // Remove listeners.
2037 AudioObjectPropertyAddress propertyAddress = {
2038 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput,
2040 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
2041 &propertyAddress, &objectListenerProc, this));
2043 propertyAddress.mSelector = kAudioDeviceProcessorOverload;
2044 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
2045 &propertyAddress, &objectListenerProc, this));
2049 Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID,
2053 propertyAddress.mSelector = kAudioDevicePropertyDataSource;
2054 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID,
2055 &propertyAddress, &objectListenerProc, this));
2059 _playIsInitialized = false;
2065 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const
2067 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
2068 delayMS = static_cast<uint16_t> (1e-3 * (renderDelayUs + _renderLatencyUs) +
2073 int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const
2075 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
2076 delayMS = static_cast<uint16_t> (1e-3 * (captureDelayUs +
2077 _captureLatencyUs) + 0.5);
2081 bool AudioDeviceMac::Playing() const
2086 int32_t AudioDeviceMac::SetPlayoutBuffer(
2087 const AudioDeviceModule::BufferType type,
2091 if (type != AudioDeviceModule::kFixedBufferSize)
2093 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2094 " Adaptive buffer size not supported on this platform");
2098 _playBufType = type;
2099 _playBufDelayFixed = sizeMS;
2103 int32_t AudioDeviceMac::PlayoutBuffer(
2104 AudioDeviceModule::BufferType& type,
2105 uint16_t& sizeMS) const
2108 type = _playBufType;
2109 sizeMS = _playBufDelayFixed;
2114 // Not implemented for Mac.
2115 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const
2118 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2119 " API call not supported on this platform");
2124 bool AudioDeviceMac::PlayoutWarning() const
2126 return (_playWarning > 0);
2129 bool AudioDeviceMac::PlayoutError() const
2131 return (_playError > 0);
2134 bool AudioDeviceMac::RecordingWarning() const
2136 return (_recWarning > 0);
2139 bool AudioDeviceMac::RecordingError() const
2141 return (_recError > 0);
2144 void AudioDeviceMac::ClearPlayoutWarning()
2149 void AudioDeviceMac::ClearPlayoutError()
2154 void AudioDeviceMac::ClearRecordingWarning()
2159 void AudioDeviceMac::ClearRecordingError()
2164 // ============================================================================
2166 // ============================================================================
2169 AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
2170 AudioDeviceID scopedDeviceIds[],
2171 const uint32_t deviceListLength)
2173 OSStatus err = noErr;
2175 AudioObjectPropertyAddress propertyAddress = {
2176 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal,
2177 kAudioObjectPropertyElementMaster };
2179 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObject,
2180 &propertyAddress, 0, NULL, &size));
2183 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2188 AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size);
2189 UInt32 numberDevices = size / sizeof(AudioDeviceID);
2190 AudioBufferList* bufferList = NULL;
2191 UInt32 numberScopedDevices = 0;
2193 // First check if there is a default device and list it
2194 UInt32 hardwareProperty = 0;
2195 if (scope == kAudioDevicePropertyScopeOutput)
2197 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
2200 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
2203 AudioObjectPropertyAddress
2204 propertyAddressDefault = { hardwareProperty,
2205 kAudioObjectPropertyScopeGlobal,
2206 kAudioObjectPropertyElementMaster };
2208 AudioDeviceID usedID;
2209 UInt32 uintSize = sizeof(UInt32);
2210 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
2211 &propertyAddressDefault, 0, NULL, &uintSize, &usedID));
2212 if (usedID != kAudioDeviceUnknown)
2214 scopedDeviceIds[numberScopedDevices] = usedID;
2215 numberScopedDevices++;
2218 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2219 "GetNumberDevices(): Default device unknown");
2222 // Then list the rest of the devices
2225 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
2226 &propertyAddress, 0, NULL, &size, deviceIds));
2232 propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration;
2233 propertyAddress.mScope = scope;
2234 propertyAddress.mElement = 0;
2235 for (UInt32 i = 0; i < numberDevices; i++)
2237 // Check for input channels
2238 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i],
2239 &propertyAddress, 0, NULL, &size));
2240 if (err == kAudioHardwareBadDeviceError)
2242 // This device doesn't actually exist; continue iterating.
2244 } else if (err != noErr)
2250 bufferList = (AudioBufferList*) malloc(size);
2251 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i],
2252 &propertyAddress, 0, NULL, &size, bufferList));
2259 if (bufferList->mNumberBuffers > 0)
2261 if (numberScopedDevices >= deviceListLength)
2263 WEBRTC_TRACE(kTraceError,
2264 kTraceAudioDevice, _id,
2265 "Device list is not long enough");
2270 scopedDeviceIds[numberScopedDevices] = deviceIds[i];
2271 numberScopedDevices++;
2303 return numberScopedDevices;
2307 AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
2308 const uint16_t index,
2311 OSStatus err = noErr;
2312 UInt32 len = kAdmMaxDeviceNameSize;
2313 AudioDeviceID deviceIds[MaxNumberDevices];
2315 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices);
2316 if (numberDevices < 0)
2319 } else if (numberDevices == 0)
2321 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2326 // If the number is below the number of devices, assume it's "WEBRTC ID"
2327 // otherwise assume it's a CoreAudio ID
2328 AudioDeviceID usedID;
2330 // Check if there is a default device
2331 bool isDefaultDevice = false;
2334 UInt32 hardwareProperty = 0;
2335 if (scope == kAudioDevicePropertyScopeOutput)
2337 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice;
2340 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice;
2342 AudioObjectPropertyAddress propertyAddress = { hardwareProperty,
2343 kAudioObjectPropertyScopeGlobal,
2344 kAudioObjectPropertyElementMaster };
2345 UInt32 size = sizeof(UInt32);
2346 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
2347 &propertyAddress, 0, NULL, &size, &usedID));
2348 if (usedID == kAudioDeviceUnknown)
2350 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2351 "GetDeviceName(): Default device unknown");
2354 isDefaultDevice = true;
2358 AudioObjectPropertyAddress propertyAddress = {
2359 kAudioDevicePropertyDeviceName, scope, 0 };
2361 if (isDefaultDevice)
2365 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID,
2366 &propertyAddress, 0, NULL, &len, devName));
2368 sprintf(name, "default (%s)", devName);
2371 if (index < numberDevices)
2373 usedID = deviceIds[index];
2379 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID,
2380 &propertyAddress, 0, NULL, &len, name));
2386 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
2387 AudioDeviceID& deviceId,
2390 OSStatus err = noErr;
2392 AudioObjectPropertyScope deviceScope;
2393 AudioObjectPropertySelector defaultDeviceSelector;
2394 AudioDeviceID deviceIds[MaxNumberDevices];
2398 deviceScope = kAudioDevicePropertyScopeInput;
2399 defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice;
2402 deviceScope = kAudioDevicePropertyScopeOutput;
2403 defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice;
2406 AudioObjectPropertyAddress
2407 propertyAddress = { defaultDeviceSelector,
2408 kAudioObjectPropertyScopeGlobal,
2409 kAudioObjectPropertyElementMaster };
2411 // Get the actual device IDs
2412 int numberDevices = GetNumberDevices(deviceScope, deviceIds,
2414 if (numberDevices < 0)
2417 } else if (numberDevices == 0)
2419 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2420 "InitDevice(): No devices");
2424 bool isDefaultDevice = false;
2425 deviceId = kAudioDeviceUnknown;
2426 if (userDeviceIndex == 0)
2428 // Try to use default system device
2429 size = sizeof(AudioDeviceID);
2430 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject,
2431 &propertyAddress, 0, NULL, &size, &deviceId));
2432 if (deviceId == kAudioDeviceUnknown)
2434 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2435 " No default device exists");
2438 isDefaultDevice = true;
2442 if (!isDefaultDevice)
2444 deviceId = deviceIds[userDeviceIndex];
2447 // Obtain device name and manufacturer for logging.
2448 // Also use this as a test to ensure a user-set device ID is valid.
2451 memset(devName, 0, sizeof(devName));
2452 memset(devManf, 0, sizeof(devManf));
2454 propertyAddress.mSelector = kAudioDevicePropertyDeviceName;
2455 propertyAddress.mScope = deviceScope;
2456 propertyAddress.mElement = 0;
2457 size = sizeof(devName);
2458 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
2459 &propertyAddress, 0, NULL, &size, devName));
2461 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer;
2462 size = sizeof(devManf);
2463 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId,
2464 &propertyAddress, 0, NULL, &size, devManf));
2468 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2469 " Input device: %s %s", devManf, devName);
2472 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2473 " Output device: %s %s", devManf, devName);
2479 OSStatus AudioDeviceMac::objectListenerProc(
2480 AudioObjectID objectId,
2481 UInt32 numberAddresses,
2482 const AudioObjectPropertyAddress addresses[],
2485 AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
2486 assert(ptrThis != NULL);
2488 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses);
2490 // AudioObjectPropertyListenerProc functions are supposed to return 0
2494 OSStatus AudioDeviceMac::implObjectListenerProc(
2495 const AudioObjectID objectId,
2496 const UInt32 numberAddresses,
2497 const AudioObjectPropertyAddress addresses[])
2499 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2500 "AudioDeviceMac::implObjectListenerProc()");
2502 for (UInt32 i = 0; i < numberAddresses; i++)
2504 if (addresses[i].mSelector == kAudioHardwarePropertyDevices)
2506 HandleDeviceChange();
2507 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat)
2509 HandleStreamFormatChange(objectId, addresses[i]);
2510 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource)
2512 HandleDataSourceChange(objectId, addresses[i]);
2513 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload)
2515 HandleProcessorOverload(addresses[i]);
2522 int32_t AudioDeviceMac::HandleDeviceChange()
2524 OSStatus err = noErr;
2526 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2527 "kAudioHardwarePropertyDevices");
2529 // A device has changed. Check if our registered devices have been removed.
2530 // Ensure the devices have been initialized, meaning the IDs are valid.
2531 if (MicrophoneIsInitialized())
2533 AudioObjectPropertyAddress propertyAddress = {
2534 kAudioDevicePropertyDeviceIsAlive,
2535 kAudioDevicePropertyScopeInput, 0 };
2536 UInt32 deviceIsAlive = 1;
2537 UInt32 size = sizeof(UInt32);
2538 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0,
2539 NULL, &size, &deviceIsAlive);
2541 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
2543 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2544 "Capture device is not alive (probably removed)");
2545 AtomicSet32(&_captureDeviceIsAlive, 0);
2546 _mixerManager.CloseMicrophone();
2549 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2550 _id, " pending recording error exists");
2552 _recError = 1; // triggers callback from module process thread
2553 } else if (err != noErr)
2555 logCAMsg(kTraceError, kTraceAudioDevice, _id,
2556 "Error in AudioDeviceGetProperty()", (const char*) &err);
2561 if (SpeakerIsInitialized())
2563 AudioObjectPropertyAddress propertyAddress = {
2564 kAudioDevicePropertyDeviceIsAlive,
2565 kAudioDevicePropertyScopeOutput, 0 };
2566 UInt32 deviceIsAlive = 1;
2567 UInt32 size = sizeof(UInt32);
2568 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0,
2569 NULL, &size, &deviceIsAlive);
2571 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0)
2573 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2574 "Render device is not alive (probably removed)");
2575 AtomicSet32(&_renderDeviceIsAlive, 0);
2576 _mixerManager.CloseSpeaker();
2577 if (_playError == 1)
2579 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2580 _id, " pending playout error exists");
2582 _playError = 1; // triggers callback from module process thread
2583 } else if (err != noErr)
2585 logCAMsg(kTraceError, kTraceAudioDevice, _id,
2586 "Error in AudioDeviceGetProperty()", (const char*) &err);
2594 int32_t AudioDeviceMac::HandleStreamFormatChange(
2595 const AudioObjectID objectId,
2596 const AudioObjectPropertyAddress propertyAddress)
2598 OSStatus err = noErr;
2600 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2601 "Stream format changed");
2603 if (objectId != _inputDeviceID && objectId != _outputDeviceID)
2608 // Get the new device format
2609 AudioStreamBasicDescription streamFormat;
2610 UInt32 size = sizeof(streamFormat);
2611 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
2612 &propertyAddress, 0, NULL, &size, &streamFormat));
2614 if (streamFormat.mFormatID != kAudioFormatLinearPCM)
2616 logCAMsg(kTraceError, kTraceAudioDevice, _id,
2617 "Unacceptable input stream format -> mFormatID",
2618 (const char *) &streamFormat.mFormatID);
2622 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS)
2624 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2625 "Too many channels on device (mChannelsPerFrame = %d)",
2626 streamFormat.mChannelsPerFrame);
2630 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2632 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2633 "mSampleRate = %f, mChannelsPerFrame = %u",
2634 streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
2635 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2636 "mBytesPerPacket = %u, mFramesPerPacket = %u",
2637 streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
2638 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2639 "mBytesPerFrame = %u, mBitsPerChannel = %u",
2640 streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
2641 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2642 "mFormatFlags = %u",
2643 streamFormat.mFormatFlags);
2644 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
2645 (const char *) &streamFormat.mFormatID);
2647 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
2649 const int io_block_size_samples = streamFormat.mChannelsPerFrame *
2650 streamFormat.mSampleRate / 100 * N_BLOCKS_IO;
2651 if (io_block_size_samples > _captureBufSizeSamples)
2653 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2654 "Input IO block size (%d) is larger than ring buffer (%u)",
2655 io_block_size_samples, _captureBufSizeSamples);
2660 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat));
2662 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2))
2664 _inDesiredFormat.mChannelsPerFrame = 2;
2667 // Disable stereo recording when we only have one channel on the device.
2668 _inDesiredFormat.mChannelsPerFrame = 1;
2670 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2671 "Stereo recording unavailable on this device");
2674 if (_ptrAudioBuffer)
2676 // Update audio buffer with the selected parameters
2677 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC);
2678 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
2681 // Recreate the converter with the new format
2682 // TODO(xians): make this thread safe
2683 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter));
2685 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat,
2686 &_captureConverter));
2689 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat));
2691 if (_outStreamFormat.mChannelsPerFrame >= 2 && (_playChannels == 2))
2693 _outDesiredFormat.mChannelsPerFrame = 2;
2696 // Disable stereo playout when we only have one channel on the device.
2697 _outDesiredFormat.mChannelsPerFrame = 1;
2699 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2700 "Stereo playout unavailable on this device");
2703 if (_ptrAudioBuffer)
2705 // Update audio buffer with the selected parameters
2706 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC);
2707 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
2710 _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT
2711 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES
2712 * _outDesiredFormat.mChannelsPerFrame;
2714 // Recreate the converter with the new format
2715 // TODO(xians): make this thread safe
2716 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_renderConverter));
2718 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, &streamFormat,
2719 &_renderConverter));
2725 int32_t AudioDeviceMac::HandleDataSourceChange(
2726 const AudioObjectID objectId,
2727 const AudioObjectPropertyAddress propertyAddress)
2729 OSStatus err = noErr;
2731 if (_macBookPro && propertyAddress.mScope
2732 == kAudioDevicePropertyScopeOutput)
2734 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2735 "Data source changed");
2737 _macBookProPanRight = false;
2738 UInt32 dataSource = 0;
2739 UInt32 size = sizeof(UInt32);
2740 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId,
2741 &propertyAddress, 0, NULL, &size, &dataSource));
2742 if (dataSource == 'ispk')
2744 _macBookProPanRight = true;
2745 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2746 "MacBook Pro using internal speakers; stereo panning right");
2749 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2750 "MacBook Pro not using internal speakers");
2756 int32_t AudioDeviceMac::HandleProcessorOverload(
2757 const AudioObjectPropertyAddress propertyAddress)
2759 // TODO(xians): we probably want to notify the user in some way of the
2760 // overload. However, the Windows interpretations of these errors seem to
2761 // be more severe than what ProcessorOverload is thrown for.
2763 // We don't log the notification, as it's sent from the HAL's IO thread. We
2764 // don't want to slow it down even further.
2765 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput)
2767 //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
2769 //_callback->ProblemIsReported(
2770 // SndCardStreamObserver::ERecordingProblem);
2773 //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2774 // "Render processor overload");
2775 //_callback->ProblemIsReported(
2776 // SndCardStreamObserver::EPlaybackProblem);
2782 // ============================================================================
2784 // ============================================================================
2786 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, const AudioTimeStamp*,
2787 const AudioBufferList* inputData,
2788 const AudioTimeStamp* inputTime,
2789 AudioBufferList* outputData,
2790 const AudioTimeStamp* outputTime,
2793 AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
2794 assert(ptrThis != NULL);
2796 ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime);
2798 // AudioDeviceIOProc functions are supposed to return 0
2802 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef,
2803 UInt32 *numberDataPackets,
2804 AudioBufferList *data,
2805 AudioStreamPacketDescription **,
2808 AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData;
2809 assert(ptrThis != NULL);
2811 return ptrThis->implOutConverterProc(numberDataPackets, data);
2814 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, const AudioTimeStamp*,
2815 const AudioBufferList* inputData,
2816 const AudioTimeStamp* inputTime,
2818 const AudioTimeStamp*, void* clientData)
2820 AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData;
2821 assert(ptrThis != NULL);
2823 ptrThis->implInDeviceIOProc(inputData, inputTime);
2825 // AudioDeviceIOProc functions are supposed to return 0
2829 OSStatus AudioDeviceMac::inConverterProc(
2831 UInt32 *numberDataPackets,
2832 AudioBufferList *data,
2833 AudioStreamPacketDescription ** /*dataPacketDescription*/,
2836 AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData);
2837 assert(ptrThis != NULL);
2839 return ptrThis->implInConverterProc(numberDataPackets, data);
2842 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData,
2843 const AudioTimeStamp *inputTime,
2844 AudioBufferList *outputData,
2845 const AudioTimeStamp *outputTime)
2847 OSStatus err = noErr;
2848 UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime);
2849 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2851 if (!_twoDevices && _recording)
2853 implInDeviceIOProc(inputData, inputTime);
2856 // Check if we should close down audio device
2857 // Double-checked locking optimization to remove locking overhead
2863 if (_twoDevices || (!_recording && !_playing))
2865 // In the case of a shared device, the single driving ioProc
2867 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID,
2869 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID,
2873 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
2874 _id, " Playout or shared device stopped");
2888 // This can be the case when a shared device is capturing but not
2889 // rendering. We allow the checks above before returning to avoid a
2890 // timeout when capturing is stopped.
2894 assert(_outStreamFormat.mBytesPerFrame != 0);
2895 UInt32 size = outputData->mBuffers->mDataByteSize
2896 / _outStreamFormat.mBytesPerFrame;
2898 // TODO(xians): signal an error somehow?
2899 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc,
2900 this, &size, outputData, NULL);
2905 // This is our own error.
2906 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2907 " Error in AudioConverterFillComplexBuffer()");
2911 logCAMsg(kTraceError, kTraceAudioDevice, _id,
2912 "Error in AudioConverterFillComplexBuffer()",
2913 (const char *) &err);
2918 ring_buffer_size_t bufSizeSamples =
2919 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer);
2921 int32_t renderDelayUs = static_cast<int32_t> (1e-3 * (outputTimeNs - nowNs)
2923 renderDelayUs += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
2924 / _outDesiredFormat.mChannelsPerFrame / _outDesiredFormat.mSampleRate
2927 AtomicSet32(&_renderDelayUs, renderDelayUs);
2932 OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets,
2933 AudioBufferList *data)
2935 assert(data->mNumberBuffers == 1);
2936 ring_buffer_size_t numSamples = *numberDataPackets
2937 * _outDesiredFormat.mChannelsPerFrame;
2939 data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame;
2940 // Always give the converter as much as it wants, zero padding as required.
2941 data->mBuffers->mDataByteSize = *numberDataPackets
2942 * _outDesiredFormat.mBytesPerPacket;
2943 data->mBuffers->mData = _renderConvertData;
2944 memset(_renderConvertData, 0, sizeof(_renderConvertData));
2946 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples);
2948 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
2949 if (kernErr != KERN_SUCCESS)
2951 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2952 " semaphore_signal_all() error: %d", kernErr);
2959 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData,
2960 const AudioTimeStamp *inputTime)
2962 OSStatus err = noErr;
2963 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime);
2964 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime());
2966 // Check if we should close down audio device
2967 // Double-checked locking optimization to remove locking overhead
2973 // This will be signalled only when a shared device is not in use.
2974 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
2975 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID,
2976 _inDeviceIOProcID));
2979 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
2980 _id, " Recording device stopped");
2984 _stopEventRec.Set();
2993 // Allow above checks to avoid a timeout on stopping capture.
2997 ring_buffer_size_t bufSizeSamples =
2998 PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer);
3000 int32_t captureDelayUs = static_cast<int32_t> (1e-3 * (nowNs - inputTimeNs)
3003 += static_cast<int32_t> ((1.0e6 * bufSizeSamples)
3004 / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate
3007 AtomicSet32(&_captureDelayUs, captureDelayUs);
3009 assert(inputData->mNumberBuffers == 1);
3010 ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize
3011 * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket;
3012 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData,
3015 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
3016 if (kernErr != KERN_SUCCESS)
3018 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3019 " semaphore_signal_all() error: %d", kernErr);
3025 OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets,
3026 AudioBufferList *data)
3028 assert(data->mNumberBuffers == 1);
3029 ring_buffer_size_t numSamples = *numberDataPackets
3030 * _inStreamFormat.mChannelsPerFrame;
3032 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples)
3034 mach_timespec_t timeout;
3036 timeout.tv_nsec = TIMER_PERIOD_MS;
3038 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout);
3039 if (kernErr == KERN_OPERATION_TIMED_OUT)
3041 int32_t signal = AtomicGet32(&_captureDeviceIsAlive);
3044 // The capture device is no longer alive; stop the worker thread.
3045 *numberDataPackets = 0;
3048 } else if (kernErr != KERN_SUCCESS)
3050 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3051 " semaphore_wait() error: %d", kernErr);
3055 // Pass the read pointer directly to the converter to avoid a memcpy.
3057 ring_buffer_size_t dummySize;
3058 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples,
3059 &data->mBuffers->mData, &numSamples,
3060 &dummyPtr, &dummySize);
3061 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples);
3063 data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame;
3064 *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame;
3065 data->mBuffers->mDataByteSize = *numberDataPackets
3066 * _inStreamFormat.mBytesPerPacket;
3071 bool AudioDeviceMac::RunRender(void* ptrThis)
3073 return static_cast<AudioDeviceMac*> (ptrThis)->RenderWorkerThread();
3076 bool AudioDeviceMac::RenderWorkerThread()
3078 ring_buffer_size_t numSamples = ENGINE_PLAY_BUF_SIZE_IN_SAMPLES
3079 * _outDesiredFormat.mChannelsPerFrame;
3080 while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer)
3081 - _renderDelayOffsetSamples < numSamples)
3083 mach_timespec_t timeout;
3085 timeout.tv_nsec = TIMER_PERIOD_MS;
3087 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout);
3088 if (kernErr == KERN_OPERATION_TIMED_OUT)
3090 int32_t signal = AtomicGet32(&_renderDeviceIsAlive);
3093 // The render device is no longer alive; stop the worker thread.
3096 } else if (kernErr != KERN_SUCCESS)
3098 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3099 " semaphore_timedwait() error: %d", kernErr);
3103 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
3105 if (!_ptrAudioBuffer)
3107 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3108 " capture AudioBuffer is invalid");
3112 // Ask for new PCM data to be played out using the AudioDeviceBuffer.
3114 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
3116 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
3117 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES)
3119 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3120 " invalid number of output samples(%d)", nSamples);
3123 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
3125 SInt16 *pPlayBuffer = (SInt16 *) &playBuffer;
3126 if (_macBookProPanRight && (_playChannels == 2))
3128 // Mix entirely into the right channel and zero the left channel.
3129 SInt32 sampleInt32 = 0;
3130 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx
3133 sampleInt32 = pPlayBuffer[sampleIdx];
3134 sampleInt32 += pPlayBuffer[sampleIdx + 1];
3137 if (sampleInt32 > 32767)
3139 sampleInt32 = 32767;
3140 } else if (sampleInt32 < -32768)
3142 sampleInt32 = -32768;
3145 pPlayBuffer[sampleIdx] = 0;
3146 pPlayBuffer[sampleIdx + 1] = static_cast<SInt16> (sampleInt32);
3150 PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples);
3155 bool AudioDeviceMac::RunCapture(void* ptrThis)
3157 return static_cast<AudioDeviceMac*> (ptrThis)->CaptureWorkerThread();
3160 bool AudioDeviceMac::CaptureWorkerThread()
3162 OSStatus err = noErr;
3163 UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES
3164 * _inDesiredFormat.mChannelsPerFrame;
3165 SInt16 recordBuffer[noRecSamples];
3166 UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES;
3168 AudioBufferList engineBuffer;
3169 engineBuffer.mNumberBuffers = 1; // Interleaved channels.
3170 engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame;
3171 engineBuffer.mBuffers->mDataByteSize = _inDesiredFormat.mBytesPerPacket
3173 engineBuffer.mBuffers->mData = recordBuffer;
3175 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc,
3176 this, &size, &engineBuffer, NULL);
3181 // This is our own error.
3185 logCAMsg(kTraceError, kTraceAudioDevice, _id,
3186 "Error in AudioConverterFillComplexBuffer()",
3187 (const char *) &err);
3192 // TODO(xians): what if the returned size is incorrect?
3193 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES)
3195 uint32_t currentMicLevel(0);
3196 uint32_t newMicLevel(0);
3197 int32_t msecOnPlaySide;
3198 int32_t msecOnRecordSide;
3200 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs);
3201 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs);
3203 msecOnPlaySide = static_cast<int32_t> (1e-3 * (renderDelayUs +
3204 _renderLatencyUs) + 0.5);
3205 msecOnRecordSide = static_cast<int32_t> (1e-3 * (captureDelayUs +
3206 _captureLatencyUs) +
3209 if (!_ptrAudioBuffer)
3211 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3212 " capture AudioBuffer is invalid");
3216 // store the recorded buffer (no action will be taken if the
3217 // #recorded samples is not a full buffer)
3218 _ptrAudioBuffer->SetRecordedBuffer((int8_t*) &recordBuffer,
3223 // store current mic level in the audio buffer if AGC is enabled
3224 if (MicrophoneVolume(currentMicLevel) == 0)
3226 // this call does not affect the actual microphone volume
3227 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
3231 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0);
3233 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
3235 // deliver recorded samples at specified sample rate, mic level etc.
3236 // to the observer using callback
3237 _ptrAudioBuffer->DeliverRecordedData();
3241 newMicLevel = _ptrAudioBuffer->NewMicLevel();
3242 if (newMicLevel != 0)
3244 // The VQE will only deliver non-zero microphone levels when
3245 // a change is needed.
3246 // Set this new mic level (received from the observer as return
3247 // value in the callback).
3248 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice,
3249 _id, " AGC change of volume: old=%u => new=%u",
3250 currentMicLevel, newMicLevel);
3251 if (SetMicrophoneVolume(newMicLevel) == -1)
3253 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3254 " the required modification of the microphone "
3264 bool AudioDeviceMac::KeyPressed() {
3265 bool key_down = false;
3266 // Loop through all Mac virtual key constant values.
3267 for (unsigned int key_index = 0;
3268 key_index < ARRAY_SIZE(prev_key_state_);
3270 bool keyState = CGEventSourceKeyState(
3271 kCGEventSourceStateHIDSystemState,
3273 // A false -> true change in keymap means a key is pressed.
3274 key_down |= (keyState && !prev_key_state_[key_index]);
3275 // Save current state.
3276 prev_key_state_[key_index] = keyState;
3280 } // namespace webrtc