1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "media/audio/mac/audio_unified_mac.h"
7 #include <CoreServices/CoreServices.h>
9 #include "base/basictypes.h"
10 #include "base/logging.h"
11 #include "base/mac/mac_logging.h"
12 #include "media/audio/mac/audio_manager_mac.h"
16 // TODO(crogers): support more than hard-coded stereo input.
17 // Ideally we would like to receive this value as a constructor argument.
18 static const int kDefaultInputChannels = 2;
20 AudioHardwareUnifiedStream::AudioHardwareUnifiedStream(
21 AudioManagerMac* manager, const AudioParameters& params)
24 client_input_channels_(kDefaultInputChannels),
28 input_channels_per_frame_(0),
29 output_channels_per_frame_(0),
31 device_(kAudioObjectUnknown),
35 // A frame is one sample across all channels. In interleaved audio the per
36 // frame fields identify the set of n |channels|. In uncompressed audio, a
37 // packet is always one frame.
38 format_.mSampleRate = params.sample_rate();
39 format_.mFormatID = kAudioFormatLinearPCM;
40 format_.mFormatFlags = kLinearPCMFormatFlagIsPacked |
41 kLinearPCMFormatFlagIsSignedInteger;
42 format_.mBitsPerChannel = params.bits_per_sample();
43 format_.mChannelsPerFrame = params.channels();
44 format_.mFramesPerPacket = 1;
45 format_.mBytesPerPacket = (format_.mBitsPerChannel * params.channels()) / 8;
46 format_.mBytesPerFrame = format_.mBytesPerPacket;
47 format_.mReserved = 0;
49 // Calculate the number of sample frames per callback.
50 number_of_frames_ = params.GetBytesPerBuffer() / format_.mBytesPerPacket;
52 input_bus_ = AudioBus::Create(client_input_channels_,
53 params.frames_per_buffer());
54 output_bus_ = AudioBus::Create(params);
57 AudioHardwareUnifiedStream::~AudioHardwareUnifiedStream() {
58 DCHECK_EQ(device_, kAudioObjectUnknown);
61 bool AudioHardwareUnifiedStream::Open() {
62 // Obtain the current output device selected by the user.
63 AudioObjectPropertyAddress pa;
64 pa.mSelector = kAudioHardwarePropertyDefaultOutputDevice;
65 pa.mScope = kAudioObjectPropertyScopeGlobal;
66 pa.mElement = kAudioObjectPropertyElementMaster;
68 UInt32 size = sizeof(device_);
70 OSStatus result = AudioObjectGetPropertyData(
71 kAudioObjectSystemObject,
78 if ((result != kAudioHardwareNoError) || (device_ == kAudioDeviceUnknown)) {
79 LOG(ERROR) << "Cannot open unified AudioDevice.";
83 // The requested sample-rate must match the hardware sample-rate.
84 Float64 sample_rate = 0.0;
85 size = sizeof(sample_rate);
87 pa.mSelector = kAudioDevicePropertyNominalSampleRate;
88 pa.mScope = kAudioObjectPropertyScopeWildcard;
89 pa.mElement = kAudioObjectPropertyElementMaster;
91 result = AudioObjectGetPropertyData(
99 if (result != noErr || sample_rate != format_.mSampleRate) {
100 LOG(ERROR) << "Requested sample-rate: " << format_.mSampleRate
101 << " must match the hardware sample-rate: " << sample_rate;
105 // Configure buffer frame size.
106 UInt32 frame_size = number_of_frames_;
108 pa.mSelector = kAudioDevicePropertyBufferFrameSize;
109 pa.mScope = kAudioDevicePropertyScopeInput;
110 pa.mElement = kAudioObjectPropertyElementMaster;
111 result = AudioObjectSetPropertyData(
119 if (result != noErr) {
120 LOG(ERROR) << "Unable to set input buffer frame size: " << frame_size;
124 pa.mScope = kAudioDevicePropertyScopeOutput;
125 result = AudioObjectSetPropertyData(
133 if (result != noErr) {
134 LOG(ERROR) << "Unable to set output buffer frame size: " << frame_size;
138 DVLOG(1) << "Sample rate: " << sample_rate;
139 DVLOG(1) << "Frame size: " << frame_size;
141 // Determine the number of input and output channels.
142 // We handle both the interleaved and non-interleaved cases.
144 // Get input stream configuration.
145 pa.mSelector = kAudioDevicePropertyStreamConfiguration;
146 pa.mScope = kAudioDevicePropertyScopeInput;
147 pa.mElement = kAudioObjectPropertyElementMaster;
149 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
150 OSSTATUS_DCHECK(result == noErr, result);
152 if (result == noErr && size > 0) {
154 scoped_ptr<uint8[]> input_list_storage(new uint8[size]);
155 AudioBufferList& input_list =
156 *reinterpret_cast<AudioBufferList*>(input_list_storage.get());
158 result = AudioObjectGetPropertyData(
165 OSSTATUS_DCHECK(result == noErr, result);
167 if (result == noErr) {
168 // Determine number of input channels.
169 input_channels_per_frame_ = input_list.mNumberBuffers > 0 ?
170 input_list.mBuffers[0].mNumberChannels : 0;
171 if (input_channels_per_frame_ == 1 && input_list.mNumberBuffers > 1) {
173 input_channels_ = input_list.mNumberBuffers;
176 input_channels_ = input_channels_per_frame_;
181 DVLOG(1) << "Input channels: " << input_channels_;
182 DVLOG(1) << "Input channels per frame: " << input_channels_per_frame_;
184 // The hardware must have at least the requested input channels.
185 if (result != noErr || client_input_channels_ > input_channels_) {
186 LOG(ERROR) << "AudioDevice does not support requested input channels.";
190 // Get output stream configuration.
191 pa.mSelector = kAudioDevicePropertyStreamConfiguration;
192 pa.mScope = kAudioDevicePropertyScopeOutput;
193 pa.mElement = kAudioObjectPropertyElementMaster;
195 result = AudioObjectGetPropertyDataSize(device_, &pa, 0, 0, &size);
196 OSSTATUS_DCHECK(result == noErr, result);
198 if (result == noErr && size > 0) {
200 scoped_ptr<uint8[]> output_list_storage(new uint8[size]);
201 AudioBufferList& output_list =
202 *reinterpret_cast<AudioBufferList*>(output_list_storage.get());
204 result = AudioObjectGetPropertyData(
211 OSSTATUS_DCHECK(result == noErr, result);
213 if (result == noErr) {
214 // Determine number of output channels.
215 output_channels_per_frame_ = output_list.mBuffers[0].mNumberChannels;
216 if (output_channels_per_frame_ == 1 && output_list.mNumberBuffers > 1) {
218 output_channels_ = output_list.mNumberBuffers;
221 output_channels_ = output_channels_per_frame_;
226 DVLOG(1) << "Output channels: " << output_channels_;
227 DVLOG(1) << "Output channels per frame: " << output_channels_per_frame_;
229 // The hardware must have at least the requested output channels.
230 if (result != noErr ||
231 output_channels_ < static_cast<int>(format_.mChannelsPerFrame)) {
232 LOG(ERROR) << "AudioDevice does not support requested output channels.";
236 // Setup the I/O proc.
237 result = AudioDeviceCreateIOProcID(device_, RenderProc, this, &io_proc_id_);
238 if (result != noErr) {
239 LOG(ERROR) << "Error creating IOProc.";
246 void AudioHardwareUnifiedStream::Close() {
247 DCHECK(!is_playing_);
249 OSStatus result = AudioDeviceDestroyIOProcID(device_, io_proc_id_);
250 OSSTATUS_DCHECK(result == noErr, result);
253 device_ = kAudioObjectUnknown;
255 // Inform the audio manager that we have been closed. This can cause our
257 manager_->ReleaseOutputStream(this);
260 void AudioHardwareUnifiedStream::Start(AudioSourceCallback* callback) {
262 DCHECK_NE(device_, kAudioObjectUnknown);
263 DCHECK(!is_playing_);
264 if (device_ == kAudioObjectUnknown || is_playing_)
269 OSStatus result = AudioDeviceStart(device_, io_proc_id_);
270 OSSTATUS_DCHECK(result == noErr, result);
276 void AudioHardwareUnifiedStream::Stop() {
280 if (device_ != kAudioObjectUnknown) {
281 OSStatus result = AudioDeviceStop(device_, io_proc_id_);
282 OSSTATUS_DCHECK(result == noErr, result);
289 void AudioHardwareUnifiedStream::SetVolume(double volume) {
290 volume_ = static_cast<float>(volume);
291 // TODO(crogers): set volume property
294 void AudioHardwareUnifiedStream::GetVolume(double* volume) {
298 // Pulls on our provider with optional input, asking it to render output.
299 // Note to future hackers of this function: Do not add locks here because this
300 // is running on a real-time thread (for low-latency).
301 OSStatus AudioHardwareUnifiedStream::Render(
302 AudioDeviceID device,
303 const AudioTimeStamp* now,
304 const AudioBufferList* input_data,
305 const AudioTimeStamp* input_time,
306 AudioBufferList* output_data,
307 const AudioTimeStamp* output_time) {
308 // Convert the input data accounting for possible interleaving.
309 // TODO(crogers): it's better to simply memcpy() if source is already planar.
310 if (input_channels_ >= client_input_channels_) {
311 for (int channel_index = 0; channel_index < client_input_channels_;
315 int source_channel_index = channel_index;
317 if (input_channels_per_frame_ > 1) {
319 source = static_cast<float*>(input_data->mBuffers[0].mData) +
320 source_channel_index;
323 source = static_cast<float*>(
324 input_data->mBuffers[source_channel_index].mData);
327 float* p = input_bus_->channel(channel_index);
328 for (int i = 0; i < number_of_frames_; ++i) {
330 source += input_channels_per_frame_;
333 } else if (input_channels_) {
337 // Give the client optional input data and have it render the output data.
338 source_->OnMoreIOData(input_bus_.get(),
340 AudioBuffersState(0, 0));
342 // TODO(crogers): handle final Core Audio 5.1 layout for 5.1 audio.
344 // Handle interleaving as necessary.
345 // TODO(crogers): it's better to simply memcpy() if dest is already planar.
347 for (int channel_index = 0;
348 channel_index < static_cast<int>(format_.mChannelsPerFrame);
352 int dest_channel_index = channel_index;
354 if (output_channels_per_frame_ > 1) {
356 dest = static_cast<float*>(output_data->mBuffers[0].mData) +
360 dest = static_cast<float*>(
361 output_data->mBuffers[dest_channel_index].mData);
364 float* p = output_bus_->channel(channel_index);
365 for (int i = 0; i < number_of_frames_; ++i) {
367 dest += output_channels_per_frame_;
374 OSStatus AudioHardwareUnifiedStream::RenderProc(
375 AudioDeviceID device,
376 const AudioTimeStamp* now,
377 const AudioBufferList* input_data,
378 const AudioTimeStamp* input_time,
379 AudioBufferList* output_data,
380 const AudioTimeStamp* output_time,
382 AudioHardwareUnifiedStream* audio_output =
383 static_cast<AudioHardwareUnifiedStream*>(user_data);
384 DCHECK(audio_output);
388 return audio_output->Render(