2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "webrtc/modules/audio_device/audio_device_config.h"
14 #include "webrtc/modules/audio_device/audio_device_utility.h"
15 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
17 #include "webrtc/system_wrappers/interface/event_wrapper.h"
18 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
19 #include "webrtc/system_wrappers/interface/trace.h"
21 webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
23 // Accesses Pulse functions through our late-binding symbol table instead of
24 // directly. This way we don't have to link to libpulse, which means our binary
25 // will work on systems that don't have it.
27 LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym)
32 // ============================================================================
34 // ============================================================================
36 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
37 _ptrAudioBuffer(NULL),
38 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
39 _timeEventRec(*EventWrapper::Create()),
40 _timeEventPlay(*EventWrapper::Create()),
41 _recStartEvent(*EventWrapper::Create()),
42 _playStartEvent(*EventWrapper::Create()),
50 _outputDeviceIndex(0),
51 _inputDeviceIsSpecified(false),
52 _outputDeviceIsSpecified(false),
56 _playBufType(AudioDeviceModule::kFixedBufferSize),
60 _recIsInitialized(false),
61 _playIsInitialized(false),
67 update_speaker_volume_at_startup_(false),
68 _playBufDelayFixed(20),
79 _playDeviceName(NULL),
81 _playDisplayDeviceName(NULL),
82 _recDisplayDeviceName(NULL),
84 _playbackBufferSize(0),
85 _playbackBufferUnused(0),
90 _tempSampleData(NULL),
91 _tempSampleDataSize(0),
92 _configuredLatencyPlay(0),
93 _configuredLatencyRec(0),
95 _paStateChanged(false),
104 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
105 "%s created", __FUNCTION__);
107 memset(_paServerVersion, 0, sizeof(_paServerVersion));
108 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
109 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
110 memset(_oldKeyState, 0, sizeof(_oldKeyState));
113 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
115 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
116 "%s destroyed", __FUNCTION__);
122 delete [] _recBuffer;
127 delete [] _playBuffer;
132 delete [] _playDeviceName;
133 _playDeviceName = NULL;
137 delete [] _recDeviceName;
138 _recDeviceName = NULL;
141 delete &_recStartEvent;
142 delete &_playStartEvent;
143 delete &_timeEventRec;
144 delete &_timeEventPlay;
148 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
151 CriticalSectionScoped lock(&_critSect);
153 _ptrAudioBuffer = audioBuffer;
155 // Inform the AudioBuffer about default settings for this implementation.
156 // Set all values to zero here since the actual settings will be done by
157 // InitPlayout and InitRecording later.
158 _ptrAudioBuffer->SetRecordingSampleRate(0);
159 _ptrAudioBuffer->SetPlayoutSampleRate(0);
160 _ptrAudioBuffer->SetRecordingChannels(0);
161 _ptrAudioBuffer->SetPlayoutChannels(0);
164 // ----------------------------------------------------------------------------
166 // ----------------------------------------------------------------------------
168 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
169 AudioDeviceModule::AudioLayer& audioLayer) const
171 audioLayer = AudioDeviceModule::kLinuxPulseAudio;
175 int32_t AudioDeviceLinuxPulse::Init()
178 CriticalSectionScoped lock(&_critSect);
185 // Initialize PulseAudio
186 if (InitPulseAudio() < 0)
188 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
189 " failed to initialize PulseAudio");
191 if (TerminatePulseAudio() < 0)
193 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
194 " failed to terminate PulseAudio");
205 //Get X display handle for typing detection
206 _XDisplay = XOpenDisplay(NULL);
209 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
210 " failed to open X display, typing detection will not work");
214 const char* threadName = "webrtc_audio_module_rec_thread";
215 _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc, this,
216 kRealtimePriority, threadName);
217 if (_ptrThreadRec == NULL)
219 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
220 " failed to create the rec audio thread");
224 unsigned int threadID(0);
225 if (!_ptrThreadRec->Start(threadID))
227 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
228 " failed to start the rec audio thread");
230 delete _ptrThreadRec;
231 _ptrThreadRec = NULL;
234 _recThreadID = threadID;
237 threadName = "webrtc_audio_module_play_thread";
238 _ptrThreadPlay = ThreadWrapper::CreateThread(PlayThreadFunc, this,
239 kRealtimePriority, threadName);
240 if (_ptrThreadPlay == NULL)
242 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
243 " failed to create the play audio thread");
248 if (!_ptrThreadPlay->Start(threadID))
250 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
251 " failed to start the play audio thread");
253 delete _ptrThreadPlay;
254 _ptrThreadPlay = NULL;
257 _playThreadID = threadID;
264 int32_t AudioDeviceLinuxPulse::Terminate()
274 _mixerManager.Close();
279 ThreadWrapper* tmpThread = _ptrThreadRec;
280 _ptrThreadRec = NULL;
283 tmpThread->SetNotAlive();
285 if (tmpThread->Stop())
290 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
291 " failed to close down the rec audio thread");
293 // Lock again since we need to protect _ptrThreadPlay.
300 ThreadWrapper* tmpThread = _ptrThreadPlay;
301 _ptrThreadPlay = NULL;
304 tmpThread->SetNotAlive();
305 _timeEventPlay.Set();
306 if (tmpThread->Stop())
311 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
312 " failed to close down the play audio thread");
318 // Terminate PulseAudio
319 if (TerminatePulseAudio() < 0)
321 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
322 " failed to terminate PulseAudio");
328 XCloseDisplay(_XDisplay);
332 _initialized = false;
333 _outputDeviceIsSpecified = false;
334 _inputDeviceIsSpecified = false;
339 bool AudioDeviceLinuxPulse::Initialized() const
341 return (_initialized);
344 int32_t AudioDeviceLinuxPulse::InitSpeaker()
347 CriticalSectionScoped lock(&_critSect);
354 if (!_outputDeviceIsSpecified)
359 // check if default device
360 if (_outputDeviceIndex == 0)
362 uint16_t deviceIndex = 0;
363 GetDefaultDeviceInfo(false, NULL, deviceIndex);
364 _paDeviceIndex = deviceIndex;
367 // get the PA device index from
369 _deviceIndex = _outputDeviceIndex;
371 // get playout devices
375 // the callback has now set the _paDeviceIndex to
376 // the PulseAudio index of the device
377 if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1)
382 // clear _deviceIndex
389 int32_t AudioDeviceLinuxPulse::InitMicrophone()
392 CriticalSectionScoped lock(&_critSect);
399 if (!_inputDeviceIsSpecified)
404 // Check if default device
405 if (_inputDeviceIndex == 0)
407 uint16_t deviceIndex = 0;
408 GetDefaultDeviceInfo(true, NULL, deviceIndex);
409 _paDeviceIndex = deviceIndex;
412 // Get the PA device index from
414 _deviceIndex = _inputDeviceIndex;
416 // get recording devices
420 // The callback has now set the _paDeviceIndex to
421 // the PulseAudio index of the device
422 if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1)
427 // Clear _deviceIndex
434 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
436 return (_mixerManager.SpeakerIsInitialized());
439 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
441 return (_mixerManager.MicrophoneIsInitialized());
444 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
447 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
449 // Make an attempt to open up the
450 // output mixer corresponding to the currently selected output device.
451 if (!wasInitialized && InitSpeaker() == -1)
453 // If we end up here it means that the selected speaker has no volume
459 // Given that InitSpeaker was successful, we know that a volume control exists
462 // Close the initialized output mixer
465 _mixerManager.CloseSpeaker();
471 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
474 // Only update the volume if it's been set while we weren't playing.
475 update_speaker_volume_at_startup_ = true;
477 return (_mixerManager.SetSpeakerVolume(volume));
480 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
485 if (_mixerManager.SpeakerVolume(level) == -1)
495 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(
497 uint16_t volumeRight)
500 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
501 " API call not supported on this platform");
505 int32_t AudioDeviceLinuxPulse::WaveOutVolume(
506 uint16_t& /*volumeLeft*/,
507 uint16_t& /*volumeRight*/) const
510 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
511 " API call not supported on this platform");
515 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
516 uint32_t& maxVolume) const
521 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
531 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
532 uint32_t& minVolume) const
537 if (_mixerManager.MinSpeakerVolume(minVol) == -1)
547 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
548 uint16_t& stepSize) const
553 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
563 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
566 bool isAvailable(false);
567 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
569 // Make an attempt to open up the
570 // output mixer corresponding to the currently selected output device.
572 if (!wasInitialized && InitSpeaker() == -1)
574 // If we end up here it means that the selected speaker has no volume
575 // control, hence it is safe to state that there is no mute control
576 // already at this stage.
581 // Check if the selected speaker has a mute control
582 _mixerManager.SpeakerMuteIsAvailable(isAvailable);
584 available = isAvailable;
586 // Close the initialized output mixer
589 _mixerManager.CloseSpeaker();
595 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
598 return (_mixerManager.SetSpeakerMute(enable));
601 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
605 if (_mixerManager.SpeakerMute(muted) == -1)
614 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
617 bool isAvailable(false);
618 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
620 // Make an attempt to open up the
621 // input mixer corresponding to the currently selected input device.
623 if (!wasInitialized && InitMicrophone() == -1)
625 // If we end up here it means that the selected microphone has no volume
626 // control, hence it is safe to state that there is no boost control
627 // already at this stage.
632 // Check if the selected microphone has a mute control
634 _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
635 available = isAvailable;
637 // Close the initialized input mixer
641 _mixerManager.CloseMicrophone();
647 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
650 return (_mixerManager.SetMicrophoneMute(enable));
653 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
657 if (_mixerManager.MicrophoneMute(muted) == -1)
666 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
669 bool isAvailable(false);
670 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
672 // Enumerate all avaliable microphone and make an attempt to open up the
673 // input mixer corresponding to the currently selected input device.
675 if (!wasInitialized && InitMicrophone() == -1)
677 // If we end up here it means that the selected microphone has no volume
678 // control, hence it is safe to state that there is no boost control
679 // already at this stage.
684 // Check if the selected microphone has a boost control
685 _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
686 available = isAvailable;
688 // Close the initialized input mixer
691 _mixerManager.CloseMicrophone();
697 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
700 return (_mixerManager.SetMicrophoneBoost(enable));
703 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
708 if (_mixerManager.MicrophoneBoost(onOff) == -1)
718 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
721 if (_recChannels == 2 && _recording) {
727 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
730 if (!wasInitialized && InitMicrophone() == -1)
732 // Cannot open the specified device
737 // Check if the selected microphone can record stereo.
738 bool isAvailable(false);
739 error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
741 available = isAvailable;
743 // Close the initialized input mixer
746 _mixerManager.CloseMicrophone();
752 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
763 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
766 if (_recChannels == 2)
774 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
777 if (_playChannels == 2 && _playing) {
783 bool wasInitialized = _mixerManager.SpeakerIsInitialized();
786 if (!wasInitialized && InitSpeaker() == -1)
788 // Cannot open the specified device.
792 // Check if the selected speaker can play stereo.
793 bool isAvailable(false);
794 error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
796 available = isAvailable;
798 // Close the initialized input mixer
801 _mixerManager.CloseSpeaker();
807 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
818 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
821 if (_playChannels == 2)
829 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable)
837 bool AudioDeviceLinuxPulse::AGC() const
843 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
847 bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
849 // Make an attempt to open up the
850 // input mixer corresponding to the currently selected output device.
851 if (!wasInitialized && InitMicrophone() == -1)
853 // If we end up here it means that the selected microphone has no volume
859 // Given that InitMicrophone was successful, we know that a volume control
863 // Close the initialized input mixer
866 _mixerManager.CloseMicrophone();
872 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume)
875 return (_mixerManager.SetMicrophoneVolume(volume));
878 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(
879 uint32_t& volume) const
884 if (_mixerManager.MicrophoneVolume(level) == -1)
886 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
887 " failed to retrive current microphone level");
896 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(
897 uint32_t& maxVolume) const
902 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
912 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
913 uint32_t& minVolume) const
918 if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
928 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
929 uint16_t& stepSize) const
934 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
944 int16_t AudioDeviceLinuxPulse::PlayoutDevices()
949 pa_operation* paOperation = NULL;
950 _numPlayDevices = 1; // init to 1 to account for "default"
952 // get the whole list of devices and update _numPlayDevices
953 paOperation = LATE(pa_context_get_sink_info_list)(_paContext,
957 WaitForOperationCompletion(paOperation);
961 return _numPlayDevices;
964 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
967 if (_playIsInitialized)
972 const uint16_t nDevices = PlayoutDevices();
974 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
975 " number of availiable output devices is %u", nDevices);
977 if (index > (nDevices - 1))
979 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
980 " device index is out of range [0,%u]", (nDevices - 1));
984 _outputDeviceIndex = index;
985 _outputDeviceIsSpecified = true;
990 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
991 AudioDeviceModule::WindowsDeviceType /*device*/)
993 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
994 "WindowsDeviceType not supported");
998 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
1000 char name[kAdmMaxDeviceNameSize],
1001 char guid[kAdmMaxGuidSize])
1004 const uint16_t nDevices = PlayoutDevices();
1006 if ((index > (nDevices - 1)) || (name == NULL))
1011 memset(name, 0, kAdmMaxDeviceNameSize);
1015 memset(guid, 0, kAdmMaxGuidSize);
1018 // Check if default device
1021 uint16_t deviceIndex = 0;
1022 return GetDefaultDeviceInfo(false, name, deviceIndex);
1025 // Tell the callback that we want
1026 // The name for this device
1027 _playDisplayDeviceName = name;
1028 _deviceIndex = index;
1030 // get playout devices
1033 // clear device name and index
1034 _playDisplayDeviceName = NULL;
1040 int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
1042 char name[kAdmMaxDeviceNameSize],
1043 char guid[kAdmMaxGuidSize])
1046 const uint16_t nDevices(RecordingDevices());
1048 if ((index > (nDevices - 1)) || (name == NULL))
1053 memset(name, 0, kAdmMaxDeviceNameSize);
1057 memset(guid, 0, kAdmMaxGuidSize);
1060 // Check if default device
1063 uint16_t deviceIndex = 0;
1064 return GetDefaultDeviceInfo(true, name, deviceIndex);
1067 // Tell the callback that we want
1068 // the name for this device
1069 _recDisplayDeviceName = name;
1070 _deviceIndex = index;
1072 // Get recording devices
1075 // Clear device name and index
1076 _recDisplayDeviceName = NULL;
1082 int16_t AudioDeviceLinuxPulse::RecordingDevices()
1087 pa_operation* paOperation = NULL;
1088 _numRecDevices = 1; // Init to 1 to account for "default"
1090 // Get the whole list of devices and update _numRecDevices
1091 paOperation = LATE(pa_context_get_source_info_list)(_paContext,
1092 PaSourceInfoCallback,
1095 WaitForOperationCompletion(paOperation);
1099 return _numRecDevices;
1102 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
1105 if (_recIsInitialized)
1110 const uint16_t nDevices(RecordingDevices());
1112 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1113 " number of availiable input devices is %u", nDevices);
1115 if (index > (nDevices - 1))
1117 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1118 " device index is out of range [0,%u]", (nDevices - 1));
1122 _inputDeviceIndex = index;
1123 _inputDeviceIsSpecified = true;
1128 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
1129 AudioDeviceModule::WindowsDeviceType /*device*/)
1131 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1132 "WindowsDeviceType not supported");
1136 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
1141 // Try to initialize the playout side
1142 int32_t res = InitPlayout();
1144 // Cancel effect of initialization
1155 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
1160 // Try to initialize the playout side
1161 int32_t res = InitRecording();
1163 // Cancel effect of initialization
1174 int32_t AudioDeviceLinuxPulse::InitPlayout()
1177 CriticalSectionScoped lock(&_critSect);
1184 if (!_outputDeviceIsSpecified)
1189 if (_playIsInitialized)
1194 // Initialize the speaker (devices might have been added or removed)
1195 if (InitSpeaker() == -1)
1197 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1198 " InitSpeaker() failed");
1201 // Set the play sample specification
1202 pa_sample_spec playSampleSpec;
1203 playSampleSpec.channels = _playChannels;
1204 playSampleSpec.format = PA_SAMPLE_S16LE;
1205 playSampleSpec.rate = sample_rate_hz_;
1207 // Create a new play stream
1208 _playStream = LATE(pa_stream_new)(_paContext, "playStream",
1209 &playSampleSpec, NULL);
1213 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1214 " failed to create play stream, err=%d",
1215 LATE(pa_context_errno)(_paContext));
1219 // Provide the playStream to the mixer
1220 _mixerManager.SetPlayStream(_playStream);
1222 if (_ptrAudioBuffer)
1224 // Update audio buffer with the selected parameters
1225 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1226 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1229 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1230 " stream state %d\n", LATE(pa_stream_get_state)(_playStream));
1233 _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1234 | PA_STREAM_INTERPOLATE_TIMING);
1236 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1238 // If configuring a specific latency then we want to specify
1239 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1240 // automatically to reach that target latency. However, that flag doesn't
1241 // exist in Ubuntu 8.04 and many people still use that, so we have to check
1242 // the protocol version of libpulse.
1243 if (LATE(pa_context_get_protocol_version)(_paContext)
1244 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1246 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1249 const pa_sample_spec *spec =
1250 LATE(pa_stream_get_sample_spec)(_playStream);
1253 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1254 " pa_stream_get_sample_spec()");
1258 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1259 uint32_t latency = bytesPerSec
1260 * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1262 // Set the play buffer attributes
1263 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1264 _playBufferAttr.tlength = latency; // target fill level of play buffer
1265 // minimum free num bytes before server request more data
1266 _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
1267 _playBufferAttr.prebuf = _playBufferAttr.tlength
1268 - _playBufferAttr.minreq; // prebuffer tlength before starting playout
1270 _configuredLatencyPlay = latency;
1273 // num samples in bytes * num channels
1274 _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
1275 _playbackBufferUnused = _playbackBufferSize;
1276 _playBuffer = new int8_t[_playbackBufferSize];
1278 // Enable underflow callback
1279 LATE(pa_stream_set_underflow_callback)(_playStream,
1280 PaStreamUnderflowCallback, this);
1282 // Set the state callback function for the stream
1283 LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this);
1285 // Mark playout side as initialized
1286 _playIsInitialized = true;
1287 _sndCardPlayDelay = 0;
1288 _sndCardRecDelay = 0;
1293 int32_t AudioDeviceLinuxPulse::InitRecording()
1296 CriticalSectionScoped lock(&_critSect);
1303 if (!_inputDeviceIsSpecified)
1308 if (_recIsInitialized)
1313 // Initialize the microphone (devices might have been added or removed)
1314 if (InitMicrophone() == -1)
1316 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1317 " InitMicrophone() failed");
1320 // Set the rec sample specification
1321 pa_sample_spec recSampleSpec;
1322 recSampleSpec.channels = _recChannels;
1323 recSampleSpec.format = PA_SAMPLE_S16LE;
1324 recSampleSpec.rate = sample_rate_hz_;
1326 // Create a new rec stream
1327 _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
1331 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1332 " failed to create rec stream, err=%d",
1333 LATE(pa_context_errno)(_paContext));
1337 // Provide the recStream to the mixer
1338 _mixerManager.SetRecStream(_recStream);
1340 if (_ptrAudioBuffer)
1342 // Update audio buffer with the selected parameters
1343 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1344 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1347 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1349 _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1350 | PA_STREAM_INTERPOLATE_TIMING);
1352 // If configuring a specific latency then we want to specify
1353 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1354 // automatically to reach that target latency. However, that flag doesn't
1355 // exist in Ubuntu 8.04 and many people still use that, so we have to check
1356 // the protocol version of libpulse.
1357 if (LATE(pa_context_get_protocol_version)(_paContext)
1358 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1360 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1363 const pa_sample_spec *spec =
1364 LATE(pa_stream_get_sample_spec)(_recStream);
1367 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1368 " pa_stream_get_sample_spec(rec)");
1372 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1373 uint32_t latency = bytesPerSec
1374 * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1376 // Set the rec buffer attributes
1377 // Note: fragsize specifies a maximum transfer size, not a minimum, so
1378 // it is not possible to force a high latency setting, only a low one.
1379 _recBufferAttr.fragsize = latency; // size of fragment
1380 _recBufferAttr.maxlength = latency + bytesPerSec
1381 * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1383 _configuredLatencyRec = latency;
1386 _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
1387 _recordBufferUsed = 0;
1388 _recBuffer = new int8_t[_recordBufferSize];
1390 // Enable overflow callback
1391 LATE(pa_stream_set_overflow_callback)(_recStream, PaStreamOverflowCallback,
1394 // Set the state callback function for the stream
1395 LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this);
1397 // Mark recording side as initialized
1398 _recIsInitialized = true;
1403 int32_t AudioDeviceLinuxPulse::StartRecording()
1406 if (!_recIsInitialized)
1416 // set state to ensure that the recording starts from the audio thread
1419 // the audio thread will signal when recording has started
1420 _timeEventRec.Set();
1421 if (kEventTimeout == _recStartEvent.Wait(10000))
1424 CriticalSectionScoped lock(&_critSect);
1428 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1429 " failed to activate recording");
1434 CriticalSectionScoped lock(&_critSect);
1437 // the recording state is set by the audio thread after recording has started
1440 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1441 " failed to activate recording");
1449 int32_t AudioDeviceLinuxPulse::StopRecording()
1452 CriticalSectionScoped lock(&_critSect);
1454 if (!_recIsInitialized)
1459 if (_recStream == NULL)
1464 _recIsInitialized = false;
1467 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1468 " stopping recording");
1473 DisableReadCallback();
1474 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1476 // Unset this here so that we don't get a TERMINATED callback
1477 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1479 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED)
1481 // Disconnect the stream
1482 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK)
1484 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1485 " failed to disconnect rec stream, err=%d\n",
1486 LATE(pa_context_errno)(_paContext));
1491 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1492 " disconnected recording");
1495 LATE(pa_stream_unref)(_recStream);
1500 // Provide the recStream to the mixer
1501 _mixerManager.SetRecStream(_recStream);
1505 delete [] _recBuffer;
1512 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
1514 return (_recIsInitialized);
1517 bool AudioDeviceLinuxPulse::Recording() const
1519 CriticalSectionScoped lock(&_critSect);
1520 return (_recording);
1523 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
1525 return (_playIsInitialized);
1528 int32_t AudioDeviceLinuxPulse::StartPlayout()
1530 if (!_playIsInitialized)
1540 // set state to ensure that playout starts from the audio thread
1543 // Both |_startPlay| and |_playing| needs protction since they are also
1544 // accessed on the playout thread.
1546 // the audio thread will signal when playout has started
1547 _timeEventPlay.Set();
1548 if (kEventTimeout == _playStartEvent.Wait(10000))
1551 CriticalSectionScoped lock(&_critSect);
1555 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1556 " failed to activate playout");
1561 CriticalSectionScoped lock(&_critSect);
1564 // the playing state is set by the audio thread after playout has started
1567 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1568 " failed to activate playing");
1576 int32_t AudioDeviceLinuxPulse::StopPlayout()
1579 CriticalSectionScoped lock(&_critSect);
1581 if (!_playIsInitialized)
1586 if (_playStream == NULL)
1591 _playIsInitialized = false;
1593 _sndCardPlayDelay = 0;
1594 _sndCardRecDelay = 0;
1596 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1597 " stopping playback");
1602 DisableWriteCallback();
1603 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1605 // Unset this here so that we don't get a TERMINATED callback
1606 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1608 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED)
1610 // Disconnect the stream
1611 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK)
1613 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1614 " failed to disconnect play stream, err=%d",
1615 LATE(pa_context_errno)(_paContext));
1620 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1621 " disconnected playback");
1624 LATE(pa_stream_unref)(_playStream);
1629 // Provide the playStream to the mixer
1630 _mixerManager.SetPlayStream(_playStream);
1634 delete [] _playBuffer;
1641 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
1643 CriticalSectionScoped lock(&_critSect);
1644 delayMS = (uint16_t) _sndCardPlayDelay;
1648 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
1650 CriticalSectionScoped lock(&_critSect);
1651 delayMS = (uint16_t) _sndCardRecDelay;
1655 bool AudioDeviceLinuxPulse::Playing() const
1657 CriticalSectionScoped lock(&_critSect);
1661 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1662 const AudioDeviceModule::BufferType type,
1666 if (type != AudioDeviceModule::kFixedBufferSize)
1668 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1669 " Adaptive buffer size not supported on this platform");
1673 _playBufType = type;
1674 _playBufDelayFixed = sizeMS;
1679 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1680 AudioDeviceModule::BufferType& type,
1681 uint16_t& sizeMS) const
1684 type = _playBufType;
1685 sizeMS = _playBufDelayFixed;
1690 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const
1693 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1694 " API call not supported on this platform");
1698 bool AudioDeviceLinuxPulse::PlayoutWarning() const
1700 CriticalSectionScoped lock(&_critSect);
1701 return (_playWarning > 0);
1704 bool AudioDeviceLinuxPulse::PlayoutError() const
1706 CriticalSectionScoped lock(&_critSect);
1707 return (_playError > 0);
1710 bool AudioDeviceLinuxPulse::RecordingWarning() const
1712 CriticalSectionScoped lock(&_critSect);
1713 return (_recWarning > 0);
1716 bool AudioDeviceLinuxPulse::RecordingError() const
1718 CriticalSectionScoped lock(&_critSect);
1719 return (_recError > 0);
1722 void AudioDeviceLinuxPulse::ClearPlayoutWarning()
1724 CriticalSectionScoped lock(&_critSect);
1728 void AudioDeviceLinuxPulse::ClearPlayoutError()
1730 CriticalSectionScoped lock(&_critSect);
1734 void AudioDeviceLinuxPulse::ClearRecordingWarning()
1736 CriticalSectionScoped lock(&_critSect);
1740 void AudioDeviceLinuxPulse::ClearRecordingError()
1742 CriticalSectionScoped lock(&_critSect);
1746 // ============================================================================
1748 // ============================================================================
1750 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis)
1752 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaContextStateCallbackHandler(
1756 // ----------------------------------------------------------------------------
1757 // PaSinkInfoCallback
1758 // ----------------------------------------------------------------------------
1760 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
1761 const pa_sink_info *i, int eol,
1764 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler(
1768 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
1769 const pa_source_info *i,
1770 int eol, void *pThis)
1772 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler(
1776 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/,
1777 const pa_server_info *i,
1780 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaServerInfoCallbackHandler(i);
1783 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis)
1785 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamStateCallbackHandler(p);
1788 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c)
1790 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1791 " context state cb");
1793 pa_context_state_t state = LATE(pa_context_get_state)(c);
1796 case PA_CONTEXT_UNCONNECTED:
1797 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1800 case PA_CONTEXT_CONNECTING:
1801 case PA_CONTEXT_AUTHORIZING:
1802 case PA_CONTEXT_SETTING_NAME:
1803 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1806 case PA_CONTEXT_FAILED:
1807 case PA_CONTEXT_TERMINATED:
1808 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1810 _paStateChanged = true;
1811 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1813 case PA_CONTEXT_READY:
1814 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1816 _paStateChanged = true;
1817 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1822 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i,
1827 // Signal that we are done
1828 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1832 if (_numPlayDevices == _deviceIndex)
1834 // Convert the device index to the one of the sink
1835 _paDeviceIndex = i->index;
1837 if (_playDeviceName)
1839 // Copy the sink name
1840 strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
1841 _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1843 if (_playDisplayDeviceName)
1845 // Copy the sink display name
1846 strncpy(_playDisplayDeviceName, i->description,
1847 kAdmMaxDeviceNameSize);
1848 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1855 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
1856 const pa_source_info *i,
1861 // Signal that we are done
1862 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1866 // We don't want to list output devices
1867 if (i->monitor_of_sink == PA_INVALID_INDEX)
1869 if (_numRecDevices == _deviceIndex)
1871 // Convert the device index to the one of the source
1872 _paDeviceIndex = i->index;
1876 // copy the source name
1877 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
1878 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1880 if (_recDisplayDeviceName)
1882 // Copy the source display name
1883 strncpy(_recDisplayDeviceName, i->description,
1884 kAdmMaxDeviceNameSize);
1885 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1893 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(const pa_server_info *i)
1895 // Use PA native sampling rate
1896 sample_rate_hz_ = i->sample_spec.rate;
1898 // Copy the PA server version
1899 strncpy(_paServerVersion, i->server_version, 31);
1900 _paServerVersion[31] = '\0';
1902 if (_recDisplayDeviceName)
1904 // Copy the source name
1905 strncpy(_recDisplayDeviceName, i->default_source_name,
1906 kAdmMaxDeviceNameSize);
1907 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1910 if (_playDisplayDeviceName)
1912 // Copy the sink name
1913 strncpy(_playDisplayDeviceName, i->default_sink_name,
1914 kAdmMaxDeviceNameSize);
1915 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1918 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1921 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
1923 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1924 " stream state cb");
1926 pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1929 case PA_STREAM_UNCONNECTED:
1930 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1933 case PA_STREAM_CREATING:
1934 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1937 case PA_STREAM_FAILED:
1938 case PA_STREAM_TERMINATED:
1939 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1942 case PA_STREAM_READY:
1943 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1948 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1951 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
1955 pa_operation* paOperation = NULL;
1957 // get the server info and update deviceName
1958 paOperation = LATE(pa_context_get_server_info)(_paContext,
1959 PaServerInfoCallback, this);
1961 WaitForOperationCompletion(paOperation);
1965 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
1966 " checking PulseAudio version: %s", _paServerVersion);
1971 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
1975 pa_operation* paOperation = NULL;
1977 // Get the server info and update sample_rate_hz_
1978 paOperation = LATE(pa_context_get_server_info)(_paContext,
1979 PaServerInfoCallback, this);
1981 WaitForOperationCompletion(paOperation);
1988 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
1992 char tmpName[kAdmMaxDeviceNameSize] = {0};
1993 // subtract length of "default: "
1994 uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
2000 strcpy(name, "default: ");
2004 // Tell the callback that we want
2005 // the name for this device
2008 _recDisplayDeviceName = tmpName;
2011 _playDisplayDeviceName = tmpName;
2015 _paDeviceIndex = -1;
2017 _numPlayDevices = 0;
2022 pa_operation* paOperation = NULL;
2024 // Get the server info and update deviceName
2025 paOperation = LATE(pa_context_get_server_info)(_paContext,
2026 PaServerInfoCallback, this);
2028 WaitForOperationCompletion(paOperation);
2030 // Get the device index
2034 = LATE(pa_context_get_source_info_by_name)(_paContext,
2036 PaSourceInfoCallback,
2041 = LATE(pa_context_get_sink_info_by_name)(_paContext,
2043 PaSinkInfoCallback, this);
2046 WaitForOperationCompletion(paOperation);
2051 index = _paDeviceIndex;
2055 // Copy to name string
2056 strncpy(pName, tmpName, nameLen);
2060 _playDisplayDeviceName = NULL;
2061 _recDisplayDeviceName = NULL;
2062 _paDeviceIndex = -1;
2064 _numPlayDevices = 0;
2070 int32_t AudioDeviceLinuxPulse::InitPulseAudio()
2075 if (!PaSymbolTable.Load())
2077 // Most likely the Pulse library and sound server are not installed on
2079 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2080 " failed to load symbol table");
2084 // Create a mainloop API and connection to the default server
2085 // the mainloop is the internal asynchronous API event loop
2087 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2088 " PA mainloop has already existed");
2091 _paMainloop = LATE(pa_threaded_mainloop_new)();
2094 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2095 " could not create mainloop");
2099 // Start the threaded main loop
2100 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
2101 if (retVal != PA_OK)
2103 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2104 " failed to start main loop, error=%d", retVal);
2108 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2109 " mainloop running!");
2113 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
2114 if (!_paMainloopApi)
2116 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2117 " could not create mainloop API");
2122 // Create a new PulseAudio context
2124 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2125 " PA context has already existed");
2129 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
2133 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2134 " could not create context");
2139 // Set state callback function
2140 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback,
2143 // Connect the context to a server (default)
2144 _paStateChanged = false;
2145 retVal = LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN,
2148 if (retVal != PA_OK)
2150 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2151 " failed to connect context, error=%d", retVal);
2156 // Wait for state change
2157 while (!_paStateChanged)
2159 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2162 // Now check to see what final state we reached.
2163 pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
2165 if (state != PA_CONTEXT_READY)
2167 if (state == PA_CONTEXT_FAILED)
2169 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2170 " failed to connect to PulseAudio sound server");
2171 } else if (state == PA_CONTEXT_TERMINATED)
2173 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2174 " PulseAudio connection terminated early");
2177 // Shouldn't happen, because we only signal on one of those three
2179 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2180 " unknown problem connecting to PulseAudio");
2188 // Give the objects to the mixer manager
2189 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
2191 // Check the version
2192 if (CheckPulseAudioVersion() < 0)
2194 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2195 " PulseAudio version %s not supported", _paServerVersion);
2199 // Initialize sampling frequency
2200 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
2202 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2203 " failed to initialize sampling frequency, set to %d Hz",
2211 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio()
2213 // Do nothing if the instance doesn't exist
2214 // likely PaSymbolTable.Load() fails
2221 // Disconnect the context
2224 LATE(pa_context_disconnect)(_paContext);
2227 // Unreference the context
2230 LATE(pa_context_unref)(_paContext);
2236 // Stop the threaded main loop
2239 LATE(pa_threaded_mainloop_stop)(_paMainloop);
2242 // Free the mainloop
2245 LATE(pa_threaded_mainloop_free)(_paMainloop);
2250 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2251 " PulseAudio terminated");
2256 void AudioDeviceLinuxPulse::PaLock()
2258 LATE(pa_threaded_mainloop_lock)(_paMainloop);
2261 void AudioDeviceLinuxPulse::PaUnLock()
2263 LATE(pa_threaded_mainloop_unlock)(_paMainloop);
2266 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
2267 pa_operation* paOperation) const
2271 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2272 "paOperation NULL in WaitForOperationCompletion");
2276 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
2278 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2281 LATE(pa_operation_unref)(paOperation);
2284 // ============================================================================
2286 // ============================================================================
2288 void AudioDeviceLinuxPulse::EnableWriteCallback()
2290 if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY)
2292 // May already have available space. Must check.
2293 _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
2294 if (_tempBufferSpace > 0)
2296 // Yup, there is already space available, so if we register a write
2297 // callback then it will not receive any event. So dispatch one ourself
2299 _timeEventPlay.Set();
2304 LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback,
2308 void AudioDeviceLinuxPulse::DisableWriteCallback()
2310 LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
2313 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/,
2314 size_t buffer_space,
2317 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler(
2321 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace)
2323 _tempBufferSpace = bufferSpace;
2325 // Since we write the data asynchronously on a different thread, we have
2326 // to temporarily disable the write callback or else Pulse will call it
2327 // continuously until we write the data. We re-enable it below.
2328 DisableWriteCallback();
2329 _timeEventPlay.Set();
2332 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/,
2335 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamUnderflowCallbackHandler();
2338 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler()
2340 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2341 " Playout underflow");
2343 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
2345 // We didn't configure a pa_buffer_attr before, so switching to one now
2346 // would be questionable.
2350 // Otherwise reconfigure the stream with a higher target latency.
2352 const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream);
2355 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2356 " pa_stream_get_sample_spec()");
2360 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2361 uint32_t newLatency = _configuredLatencyPlay + bytesPerSec
2362 * WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / WEBRTC_PA_MSECS_PER_SEC;
2364 // Set the play buffer attributes
2365 _playBufferAttr.maxlength = newLatency;
2366 _playBufferAttr.tlength = newLatency;
2367 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2368 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2370 pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream,
2371 &_playBufferAttr, NULL,
2375 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2376 " pa_stream_set_buffer_attr()");
2380 // Don't need to wait for this to complete.
2381 LATE(pa_operation_unref)(op);
2383 // Save the new latency in case we underflow again.
2384 _configuredLatencyPlay = newLatency;
2387 void AudioDeviceLinuxPulse::EnableReadCallback()
2389 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
2392 void AudioDeviceLinuxPulse::DisableReadCallback()
2394 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2397 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/,
2401 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamReadCallbackHandler();
2404 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler()
2406 // We get the data pointer and size now in order to save one Lock/Unlock
2407 // in the worker thread
2408 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, &_tempSampleDataSize)
2411 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2412 " Can't read data!");
2416 // Since we consume the data asynchronously on a different thread, we have
2417 // to temporarily disable the read callback or else Pulse will call it
2418 // continuously until we consume the data. We re-enable it below
2419 DisableReadCallback();
2420 _timeEventRec.Set();
2423 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
2426 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamOverflowCallbackHandler();
2429 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler()
2431 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2432 " Recording overflow");
2435 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream)
2437 if (!WEBRTC_PA_REPORT_LATENCY)
2449 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0)
2451 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2452 " Can't query latency");
2453 // We'd rather continue playout/capture with an incorrect delay than stop
2454 // it altogether, so return a valid value.
2460 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2461 " warning: pa_stream_get_latency reported negative delay");
2463 // The delay can be negative for monitoring streams if the captured
2464 // samples haven't been played yet. In such a case, "latency" contains the
2465 // magnitude, so we must negate it to get the real value.
2466 int32_t tmpLatency = (int32_t) -latency;
2469 // Make sure that we don't use a negative delay
2476 return (int32_t) latency;
2480 int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData,
2483 size_t size = bufferSize;
2484 uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
2486 // Account for the peeked data and the used data
2487 uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream)
2488 / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize));
2490 _sndCardRecDelay = recDelay;
2494 // Get the playout delay
2495 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000);
2498 if (_recordBufferUsed > 0)
2500 // Have to copy to the buffer until it is full
2501 size_t copy = _recordBufferSize - _recordBufferUsed;
2507 memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
2508 _recordBufferUsed += copy;
2509 bufferData = static_cast<const char *> (bufferData) + copy;
2512 if (_recordBufferUsed != _recordBufferSize)
2514 // Not enough data yet to pass to VoE
2518 // Provide data to VoiceEngine
2519 if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1)
2521 // We have stopped recording
2525 _recordBufferUsed = 0;
2528 // Now process full 10ms sample sets directly from the input
2529 while (size >= _recordBufferSize)
2531 // Provide data to VoiceEngine
2532 if (ProcessRecordedData(
2533 static_cast<int8_t *> (const_cast<void *> (bufferData)),
2534 numRecSamples, recDelay) == -1)
2536 // We have stopped recording
2540 bufferData = static_cast<const char *> (bufferData) + _recordBufferSize;
2541 size -= _recordBufferSize;
2543 // We have consumed 10ms of data
2547 // Now save any leftovers for later.
2550 memcpy(_recBuffer, bufferData, size);
2551 _recordBufferUsed = size;
2557 int32_t AudioDeviceLinuxPulse::ProcessRecordedData(
2559 uint32_t bufferSizeInSamples,
2560 uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2562 uint32_t currentMicLevel(0);
2563 uint32_t newMicLevel(0);
2565 _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
2569 // Store current mic level in the audio buffer if AGC is enabled
2570 if (MicrophoneVolume(currentMicLevel) == 0)
2572 // This call does not affect the actual microphone volume
2573 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2577 const uint32_t clockDrift(0);
2578 // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
2579 // near-end signals at the AEC for PulseAudio. I think the system delay is
2580 // being correctly calculated here, but for legacy reasons we add +10 ms to
2581 // the value in the AEC. The real fix will be part of a larger investigation
2582 // into managing system delay in the AEC.
2587 _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift);
2588 _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2589 // Deliver recorded samples at specified sample rate,
2590 // mic level etc. to the observer using callback
2592 _ptrAudioBuffer->DeliverRecordedData();
2595 // We have been unlocked - check the flag again
2603 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2604 if (newMicLevel != 0)
2606 // The VQE will only deliver non-zero microphone levels when a
2607 // change is needed.
2608 // Set this new mic level (received from the observer as return
2609 // value in the callback).
2610 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2611 " AGC change of volume: old=%u => new=%u",
2612 currentMicLevel, newMicLevel);
2613 if (SetMicrophoneVolume(newMicLevel) == -1)
2615 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2617 " the required modification of the microphone "
2626 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis)
2628 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess());
2631 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis)
2633 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess());
2636 bool AudioDeviceLinuxPulse::PlayThreadProcess()
2638 switch (_timeEventPlay.Wait(1000))
2640 case kEventSignaled:
2641 _timeEventPlay.Reset();
2644 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2645 "EventWrapper::Wait() failed");
2655 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2656 "_startPlay true, performing initial actions");
2659 _playDeviceName = NULL;
2661 // Set if not default device
2662 if (_outputDeviceIndex > 0)
2664 // Get the playout device name
2665 _playDeviceName = new char[kAdmMaxDeviceNameSize];
2666 _deviceIndex = _outputDeviceIndex;
2670 // Start muted only supported on 0.9.11 and up
2671 if (LATE(pa_context_get_protocol_version)(_paContext)
2672 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
2674 // Get the currently saved speaker mute status
2675 // and set the initial mute status accordingly
2676 bool enabled(false);
2677 _mixerManager.SpeakerMute(enabled);
2680 _playStreamFlags |= PA_STREAM_START_MUTED;
2684 // Get the currently saved speaker volume
2685 uint32_t volume = 0;
2686 if (update_speaker_volume_at_startup_)
2687 _mixerManager.SpeakerVolume(volume);
2691 // NULL gives PA the choice of startup volume.
2692 pa_cvolume* ptr_cvolume = NULL;
2693 if (update_speaker_volume_at_startup_) {
2694 pa_cvolume cVolumes;
2695 ptr_cvolume = &cVolumes;
2697 // Set the same volume for all channels
2698 const pa_sample_spec *spec =
2699 LATE(pa_stream_get_sample_spec)(_playStream);
2700 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2701 update_speaker_volume_at_startup_ = false;
2704 // Connect the stream to a sink
2705 if (LATE(pa_stream_connect_playback)(
2709 (pa_stream_flags_t) _playStreamFlags,
2710 ptr_cvolume, NULL) != PA_OK)
2712 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2713 " failed to connect play stream, err=%d",
2714 LATE(pa_context_errno)(_paContext));
2717 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2718 " play stream connected");
2720 // Wait for state change
2721 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY)
2723 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2726 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2727 " play stream ready");
2729 // We can now handle write callbacks
2730 EnableWriteCallback();
2734 // Clear device name
2735 if (_playDeviceName)
2737 delete [] _playDeviceName;
2738 _playDeviceName = NULL;
2742 _playStartEvent.Set();
2752 // Update the playout delay
2753 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream)
2757 if (_playbackBufferUnused < _playbackBufferSize)
2760 size_t write = _playbackBufferSize - _playbackBufferUnused;
2761 if (_tempBufferSpace < write)
2763 write = _tempBufferSpace;
2767 if (LATE(pa_stream_write)(
2769 (void *) &_playBuffer[_playbackBufferUnused],
2770 write, NULL, (int64_t) 0,
2771 PA_SEEK_RELATIVE) != PA_OK)
2774 if (_writeErrors > 10)
2776 if (_playError == 1)
2778 WEBRTC_TRACE(kTraceWarning,
2780 " pending playout error exists");
2782 _playError = 1; // Triggers callback from module process thread
2787 " kPlayoutError message posted: "
2788 "_writeErrors=%u, error=%d",
2790 LATE(pa_context_errno)(_paContext));
2796 _playbackBufferUnused += write;
2797 _tempBufferSpace -= write;
2800 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2801 if (_tempBufferSpace > 0) // Might have been reduced to zero by the above
2803 // Ask for new PCM data to be played out using the AudioDeviceBuffer
2804 // ensure that this callback is executed without taking the
2805 // audio-thread lock
2807 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2808 " requesting data");
2810 _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2813 // We have been unlocked - check the flag again
2820 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2821 if (nSamples != numPlaySamples)
2823 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2824 _id, " invalid number of output samples(%d)",
2828 size_t write = _playbackBufferSize;
2829 if (_tempBufferSpace < write)
2831 write = _tempBufferSpace;
2834 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2837 if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0],
2838 write, NULL, (int64_t) 0,
2839 PA_SEEK_RELATIVE) != PA_OK)
2842 if (_writeErrors > 10)
2844 if (_playError == 1)
2846 WEBRTC_TRACE(kTraceWarning,
2848 " pending playout error exists");
2850 _playError = 1; // triggers callback from module process thread
2855 " kPlayoutError message posted: "
2856 "_writeErrors=%u, error=%d",
2858 LATE(pa_context_errno)(_paContext));
2864 _playbackBufferUnused = write;
2867 _tempBufferSpace = 0;
2869 EnableWriteCallback();
2878 bool AudioDeviceLinuxPulse::RecThreadProcess()
2880 switch (_timeEventRec.Wait(1000))
2882 case kEventSignaled:
2883 _timeEventRec.Reset();
2886 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2887 "EventWrapper::Wait() failed");
2897 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2898 "_startRec true, performing initial actions");
2900 _recDeviceName = NULL;
2902 // Set if not default device
2903 if (_inputDeviceIndex > 0)
2905 // Get the recording device name
2906 _recDeviceName = new char[kAdmMaxDeviceNameSize];
2907 _deviceIndex = _inputDeviceIndex;
2913 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2914 " connecting stream");
2916 // Connect the stream to a source
2917 if (LATE(pa_stream_connect_record)(_recStream, _recDeviceName,
2919 (pa_stream_flags_t) _recStreamFlags)
2922 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2923 " failed to connect rec stream, err=%d",
2924 LATE(pa_context_errno)(_paContext));
2927 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2930 // Wait for state change
2931 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY)
2933 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2936 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2939 // We can now handle read callbacks
2940 EnableReadCallback();
2944 // Clear device name
2947 delete [] _recDeviceName;
2948 _recDeviceName = NULL;
2953 _recStartEvent.Set();
2961 // Read data and provide it to VoiceEngine
2962 if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1)
2968 _tempSampleData = NULL;
2969 _tempSampleDataSize = 0;
2974 // Ack the last thing we read
2975 if (LATE(pa_stream_drop)(_recStream) != 0)
2977 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2978 _id, " failed to drop, err=%d\n",
2979 LATE(pa_context_errno)(_paContext));
2982 if (LATE(pa_stream_readable_size)(_recStream) <= 0)
2984 // Then that was all the data
2989 const void *sampleData;
2990 size_t sampleDataSize;
2992 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize)
2995 _recError = 1; // triggers callback from module process thread
2996 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2997 _id, " RECORD_ERROR message posted, error = %d",
2998 LATE(pa_context_errno)(_paContext));
3002 _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream)
3005 // Drop lock for sigslot dispatch, which could take a while.
3007 // Read data and provide it to VoiceEngine
3008 if (ReadRecordedData(sampleData, sampleDataSize) == -1)
3015 // Return to top of loop for the ack and the check for more data.
3018 EnableReadCallback();
3027 bool AudioDeviceLinuxPulse::KeyPressed() const{
3036 // Check key map status
3037 XQueryKeymap(_XDisplay, szKey);
3039 // A bit change in keymap means a key is pressed
3040 for (i = 0; i < sizeof(szKey); i++)
3041 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
3044 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
3045 return (state != 0);