2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include <AudioToolbox/AudioServices.h> // AudioSession
13 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
15 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
16 #include "webrtc/system_wrappers/interface/trace.h"
19 AudioDeviceIPhone::AudioDeviceIPhone(const int32_t id)
21 _ptrAudioBuffer(NULL),
22 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
23 _captureWorkerThread(NULL),
24 _captureWorkerThreadId(0),
26 _auVoiceProcessing(NULL),
31 _recIsInitialized(false),
32 _playIsInitialized(false),
33 _recordingDeviceIsSpecified(false),
34 _playoutDeviceIsSpecified(false),
35 _micIsInitialized(false),
36 _speakerIsInitialized(false),
41 _playoutDelayMeasurementCounter(9999),
42 _recordingDelayHWAndOS(0),
43 _recordingDelayMeasurementCounter(9999),
48 _playoutBufferUsed(0),
49 _recordingCurrentSeq(0),
50 _recordingBufferTotalSize(0) {
51 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
52 "%s created", __FUNCTION__);
54 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
55 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
56 memset(_recordingLength, 0, sizeof(_recordingLength));
57 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
60 AudioDeviceIPhone::~AudioDeviceIPhone() {
61 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
62 "%s destroyed", __FUNCTION__);
70 // ============================================================================
72 // ============================================================================
74 void AudioDeviceIPhone::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
75 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
78 CriticalSectionScoped lock(&_critSect);
80 _ptrAudioBuffer = audioBuffer;
82 // inform the AudioBuffer about default settings for this implementation
83 _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES);
84 _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
85 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
86 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
89 int32_t AudioDeviceIPhone::ActiveAudioLayer(
90 AudioDeviceModule::AudioLayer& audioLayer) const {
91 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
93 audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
97 int32_t AudioDeviceIPhone::Init() {
98 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
101 CriticalSectionScoped lock(&_critSect);
109 // Create and start capture thread
110 if (_captureWorkerThread == NULL) {
112 = ThreadWrapper::CreateThread(RunCapture, this, kRealtimePriority,
113 "CaptureWorkerThread");
115 if (_captureWorkerThread == NULL) {
116 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
117 _id, "CreateThread() error");
121 unsigned int threadID(0);
122 bool res = _captureWorkerThread->Start(threadID);
123 _captureWorkerThreadId = static_cast<uint32_t>(threadID);
124 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
125 _id, "CaptureWorkerThread started (res=%d)", res);
127 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
128 _id, "Thread already created");
140 int32_t AudioDeviceIPhone::Terminate() {
141 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
149 // Stop capture thread
150 if (_captureWorkerThread != NULL) {
151 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
152 _id, "Stopping CaptureWorkerThread");
153 bool res = _captureWorkerThread->Stop();
154 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
155 _id, "CaptureWorkerThread stopped (res=%d)", res);
156 delete _captureWorkerThread;
157 _captureWorkerThread = NULL;
160 // Shut down Audio Unit
161 ShutdownPlayOrRecord();
164 _initialized = false;
165 _speakerIsInitialized = false;
166 _micIsInitialized = false;
167 _playoutDeviceIsSpecified = false;
168 _recordingDeviceIsSpecified = false;
172 bool AudioDeviceIPhone::Initialized() const {
173 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
175 return (_initialized);
178 int32_t AudioDeviceIPhone::InitSpeaker() {
179 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
182 CriticalSectionScoped lock(&_critSect);
185 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
186 _id, " Not initialized");
191 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
192 _id, " Cannot init speaker when playing");
196 if (!_playoutDeviceIsSpecified) {
197 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
198 _id, " Playout device is not specified");
203 _speakerIsInitialized = true;
208 int32_t AudioDeviceIPhone::InitMicrophone() {
209 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
212 CriticalSectionScoped lock(&_critSect);
215 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
216 _id, " Not initialized");
221 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
222 _id, " Cannot init mic when recording");
226 if (!_recordingDeviceIsSpecified) {
227 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
228 _id, " Recording device is not specified");
234 _micIsInitialized = true;
239 bool AudioDeviceIPhone::SpeakerIsInitialized() const {
240 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
242 return _speakerIsInitialized;
245 bool AudioDeviceIPhone::MicrophoneIsInitialized() const {
246 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
248 return _micIsInitialized;
251 int32_t AudioDeviceIPhone::SpeakerVolumeIsAvailable(bool& available) {
252 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
255 available = false; // Speaker volume not supported on iOS
260 int32_t AudioDeviceIPhone::SetSpeakerVolume(uint32_t volume) {
261 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
262 "AudioDeviceIPhone::SetSpeakerVolume(volume=%u)", volume);
264 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
265 " API call not supported on this platform");
269 int32_t AudioDeviceIPhone::SpeakerVolume(uint32_t& volume) const {
270 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
273 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
274 " API call not supported on this platform");
279 AudioDeviceIPhone::SetWaveOutVolume(uint16_t volumeLeft,
280 uint16_t volumeRight) {
285 "AudioDeviceIPhone::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
286 volumeLeft, volumeRight);
288 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
289 " API call not supported on this platform");
295 AudioDeviceIPhone::WaveOutVolume(uint16_t& /*volumeLeft*/,
296 uint16_t& /*volumeRight*/) const {
297 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
300 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
301 " API call not supported on this platform");
306 AudioDeviceIPhone::MaxSpeakerVolume(uint32_t& maxVolume) const {
307 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
310 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
311 " API call not supported on this platform");
315 int32_t AudioDeviceIPhone::MinSpeakerVolume(
316 uint32_t& minVolume) const {
317 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
320 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
321 " API call not supported on this platform");
326 AudioDeviceIPhone::SpeakerVolumeStepSize(uint16_t& stepSize) const {
327 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
330 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
331 " API call not supported on this platform");
335 int32_t AudioDeviceIPhone::SpeakerMuteIsAvailable(bool& available) {
336 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
339 available = false; // Speaker mute not supported on iOS
344 int32_t AudioDeviceIPhone::SetSpeakerMute(bool enable) {
345 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
348 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
349 " API call not supported on this platform");
353 int32_t AudioDeviceIPhone::SpeakerMute(bool& enabled) const {
354 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
357 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
358 " API call not supported on this platform");
362 int32_t AudioDeviceIPhone::MicrophoneMuteIsAvailable(bool& available) {
363 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
366 available = false; // Mic mute not supported on iOS
371 int32_t AudioDeviceIPhone::SetMicrophoneMute(bool enable) {
372 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
375 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
376 " API call not supported on this platform");
380 int32_t AudioDeviceIPhone::MicrophoneMute(bool& enabled) const {
381 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
384 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
385 " API call not supported on this platform");
389 int32_t AudioDeviceIPhone::MicrophoneBoostIsAvailable(bool& available) {
390 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
393 available = false; // Mic boost not supported on iOS
398 int32_t AudioDeviceIPhone::SetMicrophoneBoost(bool enable) {
399 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
400 "AudioDeviceIPhone::SetMicrophoneBoost(enable=%u)", enable);
402 if (!_micIsInitialized) {
403 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
404 " Microphone not initialized");
409 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
410 " SetMicrophoneBoost cannot be enabled on this platform");
417 int32_t AudioDeviceIPhone::MicrophoneBoost(bool& enabled) const {
418 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
420 if (!_micIsInitialized) {
421 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
422 " Microphone not initialized");
431 int32_t AudioDeviceIPhone::StereoRecordingIsAvailable(bool& available) {
432 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
435 available = false; // Stereo recording not supported on iOS
440 int32_t AudioDeviceIPhone::SetStereoRecording(bool enable) {
441 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
442 "AudioDeviceIPhone::SetStereoRecording(enable=%u)", enable);
445 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
446 " Stereo recording is not supported on this platform");
452 int32_t AudioDeviceIPhone::StereoRecording(bool& enabled) const {
453 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
460 int32_t AudioDeviceIPhone::StereoPlayoutIsAvailable(bool& available) {
461 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
464 available = false; // Stereo playout not supported on iOS
469 int32_t AudioDeviceIPhone::SetStereoPlayout(bool enable) {
470 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
471 "AudioDeviceIPhone::SetStereoPlayout(enable=%u)", enable);
474 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
475 " Stereo playout is not supported on this platform");
481 int32_t AudioDeviceIPhone::StereoPlayout(bool& enabled) const {
482 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
489 int32_t AudioDeviceIPhone::SetAGC(bool enable) {
490 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
491 "AudioDeviceIPhone::SetAGC(enable=%d)", enable);
498 bool AudioDeviceIPhone::AGC() const {
499 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
505 int32_t AudioDeviceIPhone::MicrophoneVolumeIsAvailable(bool& available) {
506 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
509 available = false; // Mic volume not supported on IOS
514 int32_t AudioDeviceIPhone::SetMicrophoneVolume(uint32_t volume) {
515 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
516 "AudioDeviceIPhone::SetMicrophoneVolume(volume=%u)", volume);
518 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
519 " API call not supported on this platform");
524 AudioDeviceIPhone::MicrophoneVolume(uint32_t& volume) const {
525 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
528 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
529 " API call not supported on this platform");
534 AudioDeviceIPhone::MaxMicrophoneVolume(uint32_t& maxVolume) const {
535 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
538 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
539 " API call not supported on this platform");
544 AudioDeviceIPhone::MinMicrophoneVolume(uint32_t& minVolume) const {
545 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
548 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
549 " API call not supported on this platform");
554 AudioDeviceIPhone::MicrophoneVolumeStepSize(
555 uint16_t& stepSize) const {
556 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
559 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
560 " API call not supported on this platform");
564 int16_t AudioDeviceIPhone::PlayoutDevices() {
565 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
571 int32_t AudioDeviceIPhone::SetPlayoutDevice(uint16_t index) {
572 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
573 "AudioDeviceIPhone::SetPlayoutDevice(index=%u)", index);
575 if (_playIsInitialized) {
576 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
577 " Playout already initialized");
582 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
583 " SetPlayoutDevice invalid index");
586 _playoutDeviceIsSpecified = true;
592 AudioDeviceIPhone::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
593 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
594 "WindowsDeviceType not supported");
599 AudioDeviceIPhone::PlayoutDeviceName(uint16_t index,
600 char name[kAdmMaxDeviceNameSize],
601 char guid[kAdmMaxGuidSize]) {
602 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
603 "AudioDeviceIPhone::PlayoutDeviceName(index=%u)", index);
608 // return empty strings
609 memset(name, 0, kAdmMaxDeviceNameSize);
611 memset(guid, 0, kAdmMaxGuidSize);
618 AudioDeviceIPhone::RecordingDeviceName(uint16_t index,
619 char name[kAdmMaxDeviceNameSize],
620 char guid[kAdmMaxGuidSize]) {
621 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
622 "AudioDeviceIPhone::RecordingDeviceName(index=%u)", index);
627 // return empty strings
628 memset(name, 0, kAdmMaxDeviceNameSize);
630 memset(guid, 0, kAdmMaxGuidSize);
636 int16_t AudioDeviceIPhone::RecordingDevices() {
637 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
642 int32_t AudioDeviceIPhone::SetRecordingDevice(uint16_t index) {
643 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
644 "AudioDeviceIPhone::SetRecordingDevice(index=%u)", index);
646 if (_recIsInitialized) {
647 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
648 " Recording already initialized");
653 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
654 " SetRecordingDevice invalid index");
658 _recordingDeviceIsSpecified = true;
664 AudioDeviceIPhone::SetRecordingDevice(
665 AudioDeviceModule::WindowsDeviceType) {
666 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
667 "WindowsDeviceType not supported");
671 // ----------------------------------------------------------------------------
672 // SetLoudspeakerStatus
674 // Overrides the receiver playout route to speaker instead. See
675 // kAudioSessionProperty_OverrideCategoryDefaultToSpeaker in CoreAudio
677 // ----------------------------------------------------------------------------
679 int32_t AudioDeviceIPhone::SetLoudspeakerStatus(bool enable) {
680 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
681 "AudioDeviceIPhone::SetLoudspeakerStatus(enable=%d)", enable);
683 UInt32 doChangeDefaultRoute = enable ? 1 : 0;
684 OSStatus err = AudioSessionSetProperty(
685 kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
686 sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
689 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
690 "Error changing default output route " \
691 "(only available on iOS 3.1 or later)");
698 int32_t AudioDeviceIPhone::GetLoudspeakerStatus(bool &enabled) const {
699 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
700 "AudioDeviceIPhone::SetLoudspeakerStatus(enabled=?)");
703 UInt32 size = sizeof(route);
704 OSStatus err = AudioSessionGetProperty(
705 kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
708 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
709 "Error changing default output route " \
710 "(only available on iOS 3.1 or later)");
714 enabled = route == 1 ? true: false;
719 int32_t AudioDeviceIPhone::PlayoutIsAvailable(bool& available) {
720 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
724 // Try to initialize the playout side
725 int32_t res = InitPlayout();
727 // Cancel effect of initialization
737 int32_t AudioDeviceIPhone::RecordingIsAvailable(bool& available) {
738 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
742 // Try to initialize the recording side
743 int32_t res = InitRecording();
745 // Cancel effect of initialization
755 int32_t AudioDeviceIPhone::InitPlayout() {
756 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
758 CriticalSectionScoped lock(&_critSect);
761 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized");
766 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
767 " Playout already started");
771 if (_playIsInitialized) {
772 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
773 " Playout already initialized");
777 if (!_playoutDeviceIsSpecified) {
778 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
779 " Playout device is not specified");
783 // Initialize the speaker
784 if (InitSpeaker() == -1) {
785 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
786 " InitSpeaker() failed");
789 _playIsInitialized = true;
791 if (!_recIsInitialized) {
793 if (InitPlayOrRecord() == -1) {
794 // todo: Handle error
795 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
796 " InitPlayOrRecord() failed");
799 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
800 " Recording already initialized - InitPlayOrRecord() not called");
806 bool AudioDeviceIPhone::PlayoutIsInitialized() const {
807 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
808 return (_playIsInitialized);
811 int32_t AudioDeviceIPhone::InitRecording() {
812 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
814 CriticalSectionScoped lock(&_critSect);
817 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
823 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
824 " Recording already started");
828 if (_recIsInitialized) {
829 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
830 " Recording already initialized");
834 if (!_recordingDeviceIsSpecified) {
835 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
836 " Recording device is not specified");
840 // Initialize the microphone
841 if (InitMicrophone() == -1) {
842 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
843 " InitMicrophone() failed");
846 _recIsInitialized = true;
848 if (!_playIsInitialized) {
850 if (InitPlayOrRecord() == -1) {
851 // todo: Handle error
852 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
853 " InitPlayOrRecord() failed");
856 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
857 " Playout already initialized - InitPlayOrRecord() " \
864 bool AudioDeviceIPhone::RecordingIsInitialized() const {
865 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
866 return (_recIsInitialized);
869 int32_t AudioDeviceIPhone::StartRecording() {
870 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
872 CriticalSectionScoped lock(&_critSect);
874 if (!_recIsInitialized) {
875 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
876 " Recording not initialized");
881 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
882 " Recording already started");
886 // Reset recording buffer
887 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
888 memset(_recordingLength, 0, sizeof(_recordingLength));
889 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
890 _recordingCurrentSeq = 0;
891 _recordingBufferTotalSize = 0;
893 _recordingDelayHWAndOS = 0;
894 // Make sure first call to update delay function will update delay
895 _recordingDelayMeasurementCounter = 9999;
901 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
902 " Starting Audio Unit");
903 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
905 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
906 " Error starting Audio Unit (result=%d)", result);
916 int32_t AudioDeviceIPhone::StopRecording() {
917 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
919 CriticalSectionScoped lock(&_critSect);
921 if (!_recIsInitialized) {
922 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
923 " Recording is not initialized");
930 // Both playout and recording has stopped, shutdown the device
931 ShutdownPlayOrRecord();
934 _recIsInitialized = false;
935 _micIsInitialized = false;
940 bool AudioDeviceIPhone::Recording() const {
941 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
945 int32_t AudioDeviceIPhone::StartPlayout() {
946 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
948 // This lock is (among other things) needed to avoid concurrency issues
949 // with capture thread
950 // shutting down Audio Unit
951 CriticalSectionScoped lock(&_critSect);
953 if (!_playIsInitialized) {
954 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
955 " Playout not initialized");
960 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
961 " Playing already started");
965 // Reset playout buffer
966 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
967 _playoutBufferUsed = 0;
969 // Make sure first call to update delay function will update delay
970 _playoutDelayMeasurementCounter = 9999;
976 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
977 " Starting Audio Unit");
978 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
980 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
981 " Error starting Audio Unit (result=%d)", result);
991 int32_t AudioDeviceIPhone::StopPlayout() {
992 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
994 CriticalSectionScoped lock(&_critSect);
996 if (!_playIsInitialized) {
997 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
998 " Playout is not initialized");
1005 // Both playout and recording has stopped, signal shutdown the device
1006 ShutdownPlayOrRecord();
1009 _playIsInitialized = false;
1010 _speakerIsInitialized = false;
1015 bool AudioDeviceIPhone::Playing() const {
1016 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
1017 "%s", __FUNCTION__);
1021 // ----------------------------------------------------------------------------
1024 // Disable playout and recording, signal to capture thread to shutdown,
1025 // and set enable states after shutdown to same as current.
1026 // In capture thread audio device will be shutdown, then started again.
1027 // ----------------------------------------------------------------------------
1028 int32_t AudioDeviceIPhone::ResetAudioDevice() {
1029 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1031 CriticalSectionScoped lock(&_critSect);
1033 if (!_playIsInitialized && !_recIsInitialized) {
1034 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1035 " Playout or recording not initialized, doing nothing");
1036 return 0; // Nothing to reset
1039 // Store the states we have before stopping to restart below
1040 bool initPlay = _playIsInitialized;
1041 bool play = _playing;
1042 bool initRec = _recIsInitialized;
1043 bool rec = _recording;
1047 // Stop playout and recording
1048 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1049 " Stopping playout and recording");
1050 res += StopPlayout();
1051 res += StopRecording();
1054 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1055 " Restarting playout and recording (%d, %d, %d, %d)",
1056 initPlay, play, initRec, rec);
1057 if (initPlay) res += InitPlayout();
1058 if (initRec) res += InitRecording();
1059 if (play) res += StartPlayout();
1060 if (rec) res += StartRecording();
1063 // Logging is done in init/start/stop calls above
1070 int32_t AudioDeviceIPhone::PlayoutDelay(uint16_t& delayMS) const {
1071 delayMS = _playoutDelay;
1075 int32_t AudioDeviceIPhone::RecordingDelay(uint16_t& delayMS) const {
1076 delayMS = _recordingDelay;
1081 AudioDeviceIPhone::SetPlayoutBuffer(
1082 const AudioDeviceModule::BufferType type,
1084 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
1085 "AudioDeviceIPhone::SetPlayoutBuffer(type=%u, sizeMS=%u)",
1088 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1089 " API call not supported on this platform");
1094 AudioDeviceIPhone::PlayoutBuffer(AudioDeviceModule::BufferType& type,
1095 uint16_t& sizeMS) const {
1096 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1098 type = AudioDeviceModule::kAdaptiveBufferSize;
1100 sizeMS = _playoutDelay;
1105 int32_t AudioDeviceIPhone::CPULoad(uint16_t& /*load*/) const {
1106 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1108 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1109 " API call not supported on this platform");
1113 bool AudioDeviceIPhone::PlayoutWarning() const {
1114 return (_playWarning > 0);
1117 bool AudioDeviceIPhone::PlayoutError() const {
1118 return (_playError > 0);
1121 bool AudioDeviceIPhone::RecordingWarning() const {
1122 return (_recWarning > 0);
1125 bool AudioDeviceIPhone::RecordingError() const {
1126 return (_recError > 0);
1129 void AudioDeviceIPhone::ClearPlayoutWarning() {
1133 void AudioDeviceIPhone::ClearPlayoutError() {
1137 void AudioDeviceIPhone::ClearRecordingWarning() {
1141 void AudioDeviceIPhone::ClearRecordingError() {
1145 // ============================================================================
1147 // ============================================================================
1149 int32_t AudioDeviceIPhone::InitPlayOrRecord() {
1150 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1152 OSStatus result = -1;
1154 // Check if already initialized
1155 if (NULL != _auVoiceProcessing) {
1156 // We already have initialized before and created any of the audio unit,
1157 // check that all exist
1158 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1159 " Already initialized");
1160 // todo: Call AudioUnitReset() here and empty all buffers?
1164 // Create Voice Processing Audio Unit
1165 AudioComponentDescription desc;
1166 AudioComponent comp;
1168 desc.componentType = kAudioUnitType_Output;
1169 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
1170 desc.componentManufacturer = kAudioUnitManufacturer_Apple;
1171 desc.componentFlags = 0;
1172 desc.componentFlagsMask = 0;
1174 comp = AudioComponentFindNext(NULL, &desc);
1176 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1177 " Could not find audio component for Audio Unit");
1181 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
1183 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1184 " Could not create Audio Unit instance (result=%d)",
1189 // Set preferred hardware sample rate to 16 kHz
1190 Float64 sampleRate(16000.0);
1191 result = AudioSessionSetProperty(
1192 kAudioSessionProperty_PreferredHardwareSampleRate,
1193 sizeof(sampleRate), &sampleRate);
1195 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1196 "Could not set preferred sample rate (result=%d)", result);
1199 uint32_t voiceChat = kAudioSessionMode_VoiceChat;
1200 AudioSessionSetProperty(kAudioSessionProperty_Mode,
1201 sizeof(voiceChat), &voiceChat);
1203 //////////////////////
1204 // Setup Voice Processing Audio Unit
1206 // Note: For Signal Processing AU element 0 is output bus, element 1 is
1207 // input bus for global scope element is irrelevant (always use
1210 // Enable IO on both elements
1212 // todo: Below we just log and continue upon error. We might want
1213 // to close AU and return error for some cases.
1214 // todo: Log info about setup.
1216 UInt32 enableIO = 1;
1217 result = AudioUnitSetProperty(_auVoiceProcessing,
1218 kAudioOutputUnitProperty_EnableIO,
1219 kAudioUnitScope_Input,
1224 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1225 " Could not enable IO on input (result=%d)", result);
1228 result = AudioUnitSetProperty(_auVoiceProcessing,
1229 kAudioOutputUnitProperty_EnableIO,
1230 kAudioUnitScope_Output,
1235 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1236 " Could not enable IO on output (result=%d)", result);
1239 // Disable AU buffer allocation for the recorder, we allocate our own
1241 result = AudioUnitSetProperty(
1242 _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
1243 kAudioUnitScope_Output, 1, &flag, sizeof(flag));
1245 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1246 " Could not disable AU buffer allocation (result=%d)",
1248 // Should work anyway
1251 // Set recording callback
1252 AURenderCallbackStruct auCbS;
1253 memset(&auCbS, 0, sizeof(auCbS));
1254 auCbS.inputProc = RecordProcess;
1255 auCbS.inputProcRefCon = this;
1256 result = AudioUnitSetProperty(_auVoiceProcessing,
1257 kAudioOutputUnitProperty_SetInputCallback,
1258 kAudioUnitScope_Global, 1,
1259 &auCbS, sizeof(auCbS));
1261 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1262 " Could not set record callback for Audio Unit (result=%d)",
1266 // Set playout callback
1267 memset(&auCbS, 0, sizeof(auCbS));
1268 auCbS.inputProc = PlayoutProcess;
1269 auCbS.inputProcRefCon = this;
1270 result = AudioUnitSetProperty(_auVoiceProcessing,
1271 kAudioUnitProperty_SetRenderCallback,
1272 kAudioUnitScope_Global, 0,
1273 &auCbS, sizeof(auCbS));
1275 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1276 " Could not set play callback for Audio Unit (result=%d)",
1280 // Get stream format for out/0
1281 AudioStreamBasicDescription playoutDesc;
1282 UInt32 size = sizeof(playoutDesc);
1283 result = AudioUnitGetProperty(_auVoiceProcessing,
1284 kAudioUnitProperty_StreamFormat,
1285 kAudioUnitScope_Output, 0, &playoutDesc,
1288 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1289 " Could not get stream format Audio Unit out/0 (result=%d)",
1292 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1293 " Audio Unit playout opened in sampling rate %f",
1294 playoutDesc.mSampleRate);
1296 playoutDesc.mSampleRate = sampleRate;
1298 // Store the sampling frequency to use towards the Audio Device Buffer
1299 // todo: Add 48 kHz (increase buffer sizes). Other fs?
1300 if ((playoutDesc.mSampleRate > 44090.0)
1301 && (playoutDesc.mSampleRate < 44110.0)) {
1302 _adbSampFreq = 44100;
1303 } else if ((playoutDesc.mSampleRate > 15990.0)
1304 && (playoutDesc.mSampleRate < 16010.0)) {
1305 _adbSampFreq = 16000;
1306 } else if ((playoutDesc.mSampleRate > 7990.0)
1307 && (playoutDesc.mSampleRate < 8010.0)) {
1308 _adbSampFreq = 8000;
1311 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1312 " Audio Unit out/0 opened in unknown sampling rate (%f)",
1313 playoutDesc.mSampleRate);
1314 // todo: We should bail out here.
1317 // Set the audio device buffer sampling rate,
1318 // we assume we get the same for play and record
1319 if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
1320 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1321 " Could not set audio device buffer recording sampling rate (%d)",
1325 if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
1326 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1327 " Could not set audio device buffer playout sampling rate (%d)",
1331 // Set stream format for in/0 (use same sampling frequency as for out/0)
1332 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1333 | kLinearPCMFormatFlagIsPacked
1334 | kLinearPCMFormatFlagIsNonInterleaved;
1335 playoutDesc.mBytesPerPacket = 2;
1336 playoutDesc.mFramesPerPacket = 1;
1337 playoutDesc.mBytesPerFrame = 2;
1338 playoutDesc.mChannelsPerFrame = 1;
1339 playoutDesc.mBitsPerChannel = 16;
1340 result = AudioUnitSetProperty(_auVoiceProcessing,
1341 kAudioUnitProperty_StreamFormat,
1342 kAudioUnitScope_Input, 0, &playoutDesc, size);
1344 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1345 " Could not set stream format Audio Unit in/0 (result=%d)",
1349 // Get stream format for in/1
1350 AudioStreamBasicDescription recordingDesc;
1351 size = sizeof(recordingDesc);
1352 result = AudioUnitGetProperty(_auVoiceProcessing,
1353 kAudioUnitProperty_StreamFormat,
1354 kAudioUnitScope_Input, 1, &recordingDesc,
1357 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1358 " Could not get stream format Audio Unit in/1 (result=%d)",
1361 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1362 " Audio Unit recording opened in sampling rate %f",
1363 recordingDesc.mSampleRate);
1365 recordingDesc.mSampleRate = sampleRate;
1367 // Set stream format for out/1 (use same sampling frequency as for in/1)
1368 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1369 | kLinearPCMFormatFlagIsPacked
1370 | kLinearPCMFormatFlagIsNonInterleaved;
1372 recordingDesc.mBytesPerPacket = 2;
1373 recordingDesc.mFramesPerPacket = 1;
1374 recordingDesc.mBytesPerFrame = 2;
1375 recordingDesc.mChannelsPerFrame = 1;
1376 recordingDesc.mBitsPerChannel = 16;
1377 result = AudioUnitSetProperty(_auVoiceProcessing,
1378 kAudioUnitProperty_StreamFormat,
1379 kAudioUnitScope_Output, 1, &recordingDesc,
1382 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1383 " Could not set stream format Audio Unit out/1 (result=%d)",
1387 // Initialize here already to be able to get/set stream properties.
1388 result = AudioUnitInitialize(_auVoiceProcessing);
1390 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1391 " Could not init Audio Unit (result=%d)", result);
1394 // Get hardware sample rate for logging (see if we get what we asked for)
1395 Float64 hardwareSampleRate = 0.0;
1396 size = sizeof(hardwareSampleRate);
1397 result = AudioSessionGetProperty(
1398 kAudioSessionProperty_CurrentHardwareSampleRate, &size,
1399 &hardwareSampleRate);
1401 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1402 " Could not get current HW sample rate (result=%d)", result);
1404 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1405 " Current HW sample rate is %f, ADB sample rate is %d",
1406 hardwareSampleRate, _adbSampFreq);
1411 int32_t AudioDeviceIPhone::ShutdownPlayOrRecord() {
1412 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1414 // Close and delete AU
1415 OSStatus result = -1;
1416 if (NULL != _auVoiceProcessing) {
1417 result = AudioOutputUnitStop(_auVoiceProcessing);
1419 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1420 " Error stopping Audio Unit (result=%d)", result);
1422 result = AudioComponentInstanceDispose(_auVoiceProcessing);
1424 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1425 " Error disposing Audio Unit (result=%d)", result);
1427 _auVoiceProcessing = NULL;
1433 // ============================================================================
1435 // ============================================================================
1438 AudioDeviceIPhone::RecordProcess(void *inRefCon,
1439 AudioUnitRenderActionFlags *ioActionFlags,
1440 const AudioTimeStamp *inTimeStamp,
1442 UInt32 inNumberFrames,
1443 AudioBufferList *ioData) {
1444 AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
1446 return ptrThis->RecordProcessImpl(ioActionFlags,
1454 AudioDeviceIPhone::RecordProcessImpl(
1455 AudioUnitRenderActionFlags *ioActionFlags,
1456 const AudioTimeStamp *inTimeStamp,
1457 uint32_t inBusNumber,
1458 uint32_t inNumberFrames) {
1459 // Setup some basic stuff
1460 // Use temp buffer not to lock up recording buffer more than necessary
1461 // todo: Make dataTmp a member variable with static size that holds
1462 // max possible frames?
1463 int16_t* dataTmp = new int16_t[inNumberFrames];
1464 memset(dataTmp, 0, 2*inNumberFrames);
1466 AudioBufferList abList;
1467 abList.mNumberBuffers = 1;
1468 abList.mBuffers[0].mData = dataTmp;
1469 abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample
1470 abList.mBuffers[0].mNumberChannels = 1;
1472 // Get data from mic
1473 OSStatus res = AudioUnitRender(_auVoiceProcessing,
1474 ioActionFlags, inTimeStamp,
1475 inBusNumber, inNumberFrames, &abList);
1477 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1478 " Error getting rec data, error = %d", res);
1480 if (_recWarning > 0) {
1481 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1482 " Pending rec warning exists");
1491 // Insert all data in temp buffer into recording buffers
1492 // There is zero or one buffer partially full at any given time,
1493 // all others are full or empty
1494 // Full means filled with noSamp10ms samples.
1496 const unsigned int noSamp10ms = _adbSampFreq / 100;
1497 unsigned int dataPos = 0;
1498 uint16_t bufPos = 0;
1499 int16_t insertPos = -1;
1500 unsigned int nCopy = 0; // Number of samples to copy
1502 while (dataPos < inNumberFrames) {
1503 // Loop over all recording buffers or
1504 // until we find the partially full buffer
1505 // First choice is to insert into partially full buffer,
1506 // second choice is to insert into empty buffer
1510 while (bufPos < N_REC_BUFFERS) {
1511 if ((_recordingLength[bufPos] > 0)
1512 && (_recordingLength[bufPos] < noSamp10ms)) {
1513 // Found the partially full buffer
1514 insertPos = static_cast<int16_t>(bufPos);
1515 // Don't need to search more, quit loop
1516 bufPos = N_REC_BUFFERS;
1517 } else if ((-1 == insertPos)
1518 && (0 == _recordingLength[bufPos])) {
1519 // Found an empty buffer
1520 insertPos = static_cast<int16_t>(bufPos);
1525 // Insert data into buffer
1526 if (insertPos > -1) {
1527 // We found a non-full buffer, copy data to it
1528 unsigned int dataToCopy = inNumberFrames - dataPos;
1529 unsigned int currentRecLen = _recordingLength[insertPos];
1530 unsigned int roomInBuffer = noSamp10ms - currentRecLen;
1531 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
1533 memcpy(&_recordingBuffer[insertPos][currentRecLen],
1534 &dataTmp[dataPos], nCopy*sizeof(int16_t));
1535 if (0 == currentRecLen) {
1536 _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
1537 ++_recordingCurrentSeq;
1539 _recordingBufferTotalSize += nCopy;
1540 // Has to be done last to avoid interrupt problems
1542 _recordingLength[insertPos] += nCopy;
1545 // Didn't find a non-full buffer
1546 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1547 " Could not insert into recording buffer");
1548 if (_recWarning > 0) {
1549 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1550 " Pending rec warning exists");
1553 dataPos = inNumberFrames; // Don't try to insert more
1564 AudioDeviceIPhone::PlayoutProcess(void *inRefCon,
1565 AudioUnitRenderActionFlags *ioActionFlags,
1566 const AudioTimeStamp *inTimeStamp,
1568 UInt32 inNumberFrames,
1569 AudioBufferList *ioData) {
1570 AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
1572 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
1576 AudioDeviceIPhone::PlayoutProcessImpl(uint32_t inNumberFrames,
1577 AudioBufferList *ioData) {
1578 // Setup some basic stuff
1579 // assert(sizeof(short) == 2); // Assumption for implementation
1582 static_cast<int16_t*>(ioData->mBuffers[0].mData);
1583 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
1584 unsigned int dataSize = dataSizeBytes/2; // Number of samples
1585 if (dataSize != inNumberFrames) { // Should always be the same
1586 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1587 "dataSize (%u) != inNumberFrames (%u)",
1588 dataSize, (unsigned int)inNumberFrames);
1589 if (_playWarning > 0) {
1590 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1591 " Pending play warning exists");
1595 memset(data, 0, dataSizeBytes); // Start with empty buffer
1598 // Get playout data from Audio Device Buffer
1601 unsigned int noSamp10ms = _adbSampFreq / 100;
1602 // todo: Member variable and allocate when samp freq is determined
1603 int16_t* dataTmp = new int16_t[noSamp10ms];
1604 memset(dataTmp, 0, 2*noSamp10ms);
1605 unsigned int dataPos = 0;
1606 int noSamplesOut = 0;
1607 unsigned int nCopy = 0;
1609 // First insert data from playout buffer if any
1610 if (_playoutBufferUsed > 0) {
1611 nCopy = (dataSize < _playoutBufferUsed) ?
1612 dataSize : _playoutBufferUsed;
1613 if (nCopy != _playoutBufferUsed) {
1614 // todo: If dataSize < _playoutBufferUsed
1615 // (should normally never be)
1616 // we must move the remaining data
1617 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1618 "nCopy (%u) != _playoutBufferUsed (%u)",
1619 nCopy, _playoutBufferUsed);
1620 if (_playWarning > 0) {
1621 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1622 " Pending play warning exists");
1626 memcpy(data, _playoutBuffer, 2*nCopy);
1628 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
1629 _playoutBufferUsed = 0;
1632 // Now get the rest from Audio Device Buffer
1633 while (dataPos < dataSize) {
1634 // Update playout delay
1635 UpdatePlayoutDelay();
1637 // Ask for new PCM data to be played out using the AudioDeviceBuffer
1638 noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
1640 // Get data from Audio Device Buffer
1642 _ptrAudioBuffer->GetPlayoutData(
1643 reinterpret_cast<int8_t*>(dataTmp));
1644 // Cast OK since only equality comparison
1645 if (noSamp10ms != (unsigned int)noSamplesOut) {
1646 // Should never happen
1647 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1648 "noSamp10ms (%u) != noSamplesOut (%d)",
1649 noSamp10ms, noSamplesOut);
1651 if (_playWarning > 0) {
1652 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1653 " Pending play warning exists");
1658 // Insert as much as fits in data buffer
1659 nCopy = (dataSize-dataPos) > noSamp10ms ?
1660 noSamp10ms : (dataSize-dataPos);
1661 memcpy(&data[dataPos], dataTmp, 2*nCopy);
1663 // Save rest in playout buffer if any
1664 if (nCopy < noSamp10ms) {
1665 memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
1666 _playoutBufferUsed = noSamp10ms - nCopy;
1669 // Update loop/index counter, if we copied less than noSamp10ms
1670 // samples we shall quit loop anyway
1671 dataPos += noSamp10ms;
1680 void AudioDeviceIPhone::UpdatePlayoutDelay() {
1681 ++_playoutDelayMeasurementCounter;
1683 if (_playoutDelayMeasurementCounter >= 100) {
1684 // Update HW and OS delay every second, unlikely to change
1686 // Since this is eventually rounded to integral ms, add 0.5ms
1687 // here to get round-to-nearest-int behavior instead of
1689 float totalDelaySeconds = 0.0005;
1691 // HW output latency
1693 UInt32 size = sizeof(f32);
1694 OSStatus result = AudioSessionGetProperty(
1695 kAudioSessionProperty_CurrentHardwareOutputLatency, &size, &f32);
1697 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1698 "error HW latency (result=%d)", result);
1701 totalDelaySeconds += f32;
1703 // HW buffer duration
1705 result = AudioSessionGetProperty(
1706 kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
1708 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1709 "error HW buffer duration (result=%d)", result);
1712 totalDelaySeconds += f32;
1717 result = AudioUnitGetProperty(_auVoiceProcessing,
1718 kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, &f64, &size);
1720 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1721 "error AU latency (result=%d)", result);
1724 totalDelaySeconds += f64;
1727 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
1730 _playoutDelayMeasurementCounter = 0;
1733 // todo: Add playout buffer?
1736 void AudioDeviceIPhone::UpdateRecordingDelay() {
1737 ++_recordingDelayMeasurementCounter;
1739 if (_recordingDelayMeasurementCounter >= 100) {
1740 // Update HW and OS delay every second, unlikely to change
1742 // Since this is eventually rounded to integral ms, add 0.5ms
1743 // here to get round-to-nearest-int behavior instead of
1745 float totalDelaySeconds = 0.0005;
1749 UInt32 size = sizeof(f32);
1750 OSStatus result = AudioSessionGetProperty(
1751 kAudioSessionProperty_CurrentHardwareInputLatency, &size, &f32);
1753 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1754 "error HW latency (result=%d)", result);
1757 totalDelaySeconds += f32;
1759 // HW buffer duration
1761 result = AudioSessionGetProperty(
1762 kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
1764 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1765 "error HW buffer duration (result=%d)", result);
1768 totalDelaySeconds += f32;
1773 result = AudioUnitGetProperty(_auVoiceProcessing,
1774 kAudioUnitProperty_Latency,
1775 kAudioUnitScope_Global, 0, &f64, &size);
1777 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1778 "error AU latency (result=%d)", result);
1781 totalDelaySeconds += f64;
1784 _recordingDelayHWAndOS =
1785 static_cast<uint32_t>(totalDelaySeconds / 1000);
1788 _recordingDelayMeasurementCounter = 0;
1791 _recordingDelay = _recordingDelayHWAndOS;
1793 // ADB recording buffer size, update every time
1794 // Don't count the one next 10 ms to be sent, then convert samples => ms
1795 const uint32_t noSamp10ms = _adbSampFreq / 100;
1796 if (_recordingBufferTotalSize > noSamp10ms) {
1798 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
1802 bool AudioDeviceIPhone::RunCapture(void* ptrThis) {
1803 return static_cast<AudioDeviceIPhone*>(ptrThis)->CaptureWorkerThread();
1806 bool AudioDeviceIPhone::CaptureWorkerThread() {
1809 unsigned int lowestSeq = 0;
1810 int lowestSeqBufPos = 0;
1811 bool foundBuf = true;
1812 const unsigned int noSamp10ms = _adbSampFreq / 100;
1815 // Check if we have any buffer with data to insert
1816 // into the Audio Device Buffer,
1817 // and find the one with the lowest seq number
1819 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
1820 if (noSamp10ms == _recordingLength[bufPos]) {
1822 lowestSeq = _recordingSeqNumber[bufPos];
1823 lowestSeqBufPos = bufPos;
1825 } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
1826 lowestSeq = _recordingSeqNumber[bufPos];
1827 lowestSeqBufPos = bufPos;
1832 // Insert data into the Audio Device Buffer if found any
1834 // Update recording delay
1835 UpdateRecordingDelay();
1837 // Set the recorded buffer
1838 _ptrAudioBuffer->SetRecordedBuffer(
1839 reinterpret_cast<int8_t*>(
1840 _recordingBuffer[lowestSeqBufPos]),
1841 _recordingLength[lowestSeqBufPos]);
1843 // Don't need to set the current mic level in ADB since we only
1844 // support digital AGC,
1845 // and besides we cannot get or set the IOS mic level anyway.
1847 // Set VQE info, use clockdrift == 0
1848 _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
1850 // Deliver recorded samples at specified sample rate, mic level
1851 // etc. to the observer using callback
1852 _ptrAudioBuffer->DeliverRecordedData();
1854 // Make buffer available
1855 _recordingSeqNumber[lowestSeqBufPos] = 0;
1856 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
1857 // Must be done last to avoid interrupt problems between threads
1858 _recordingLength[lowestSeqBufPos] = 0;
1860 } // while (foundBuf)
1861 } // if (_recording)
1865 // Sleep thread (5ms) to let other threads get to work
1866 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
1870 t.tv_nsec = 5*1000*1000;
1871 nanosleep(&t, NULL);
1877 } // namespace webrtc