capture: Implement per-stream volume control for capture streams.
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core-error.h>
38 #include <pulsecore/core.h>
39 #include <pulsecore/module.h>
40 #include <pulsecore/memchunk.h>
41 #include <pulsecore/sink.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/core-rtclock.h>
44 #include <pulsecore/core-util.h>
45 #include <pulsecore/sample-util.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/thread.h>
49 #include <pulsecore/core-error.h>
50 #include <pulsecore/thread-mq.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/time-smoother.h>
53
54 #include <modules/reserve-wrap.h>
55
56 #include "alsa-util.h"
57 #include "alsa-source.h"
58
59 /* #define DEBUG_TIMING */
60
61 #define DEFAULT_DEVICE "default"
62
63 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
64 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
65
66 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
67 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
68 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
69 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
70 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
71 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
72
73 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
74 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
75
76 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
77 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
78
79 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
80 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
81
82 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
83
84 struct userdata {
85     pa_core *core;
86     pa_module *module;
87     pa_source *source;
88
89     pa_thread *thread;
90     pa_thread_mq thread_mq;
91     pa_rtpoll *rtpoll;
92
93     snd_pcm_t *pcm_handle;
94
95     pa_alsa_fdlist *mixer_fdl;
96     pa_alsa_mixer_pdata *mixer_pd;
97     snd_mixer_t *mixer_handle;
98     pa_alsa_path_set *mixer_path_set;
99     pa_alsa_path *mixer_path;
100
101     pa_cvolume hardware_volume;
102
103     size_t
104         frame_size,
105         fragment_size,
106         hwbuf_size,
107         tsched_watermark,
108         hwbuf_unused,
109         min_sleep,
110         min_wakeup,
111         watermark_inc_step,
112         watermark_dec_step,
113         watermark_inc_threshold,
114         watermark_dec_threshold;
115
116     pa_usec_t watermark_dec_not_before;
117
118     char *device_name;  /* name of the PCM device */
119     char *control_device; /* name of the control device */
120
121     pa_bool_t use_mmap:1, use_tsched:1;
122
123     pa_bool_t first;
124
125     pa_rtpoll_item *alsa_rtpoll_item;
126
127     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
128
129     pa_smoother *smoother;
130     uint64_t read_count;
131     pa_usec_t smoother_interval;
132     pa_usec_t last_smoother_update;
133
134     pa_reserve_wrapper *reserve;
135     pa_hook_slot *reserve_slot;
136     pa_reserve_monitor_wrapper *monitor;
137     pa_hook_slot *monitor_slot;
138 };
139
140 static void userdata_free(struct userdata *u);
141
142 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
143     pa_assert(r);
144     pa_assert(u);
145
146     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
147         return PA_HOOK_CANCEL;
148
149     return PA_HOOK_OK;
150 }
151
152 static void reserve_done(struct userdata *u) {
153     pa_assert(u);
154
155     if (u->reserve_slot) {
156         pa_hook_slot_free(u->reserve_slot);
157         u->reserve_slot = NULL;
158     }
159
160     if (u->reserve) {
161         pa_reserve_wrapper_unref(u->reserve);
162         u->reserve = NULL;
163     }
164 }
165
166 static void reserve_update(struct userdata *u) {
167     const char *description;
168     pa_assert(u);
169
170     if (!u->source || !u->reserve)
171         return;
172
173     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
174         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
175 }
176
177 static int reserve_init(struct userdata *u, const char *dname) {
178     char *rname;
179
180     pa_assert(u);
181     pa_assert(dname);
182
183     if (u->reserve)
184         return 0;
185
186     if (pa_in_system_mode())
187         return 0;
188
189     if (!(rname = pa_alsa_get_reserve_name(dname)))
190         return 0;
191
192     /* We are resuming, try to lock the device */
193     u->reserve = pa_reserve_wrapper_get(u->core, rname);
194     pa_xfree(rname);
195
196     if (!(u->reserve))
197         return -1;
198
199     reserve_update(u);
200
201     pa_assert(!u->reserve_slot);
202     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
203
204     return 0;
205 }
206
207 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
208     pa_bool_t b;
209
210     pa_assert(w);
211     pa_assert(u);
212
213     b = PA_PTR_TO_UINT(busy) && !u->reserve;
214
215     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
216     return PA_HOOK_OK;
217 }
218
219 static void monitor_done(struct userdata *u) {
220     pa_assert(u);
221
222     if (u->monitor_slot) {
223         pa_hook_slot_free(u->monitor_slot);
224         u->monitor_slot = NULL;
225     }
226
227     if (u->monitor) {
228         pa_reserve_monitor_wrapper_unref(u->monitor);
229         u->monitor = NULL;
230     }
231 }
232
233 static int reserve_monitor_init(struct userdata *u, const char *dname) {
234     char *rname;
235
236     pa_assert(u);
237     pa_assert(dname);
238
239     if (pa_in_system_mode())
240         return 0;
241
242     if (!(rname = pa_alsa_get_reserve_name(dname)))
243         return 0;
244
245     /* We are resuming, try to lock the device */
246     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
247     pa_xfree(rname);
248
249     if (!(u->monitor))
250         return -1;
251
252     pa_assert(!u->monitor_slot);
253     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
254
255     return 0;
256 }
257
258 static void fix_min_sleep_wakeup(struct userdata *u) {
259     size_t max_use, max_use_2;
260
261     pa_assert(u);
262     pa_assert(u->use_tsched);
263
264     max_use = u->hwbuf_size - u->hwbuf_unused;
265     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
266
267     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
268     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
269
270     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
271     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
272 }
273
274 static void fix_tsched_watermark(struct userdata *u) {
275     size_t max_use;
276     pa_assert(u);
277     pa_assert(u->use_tsched);
278
279     max_use = u->hwbuf_size - u->hwbuf_unused;
280
281     if (u->tsched_watermark > max_use - u->min_sleep)
282         u->tsched_watermark = max_use - u->min_sleep;
283
284     if (u->tsched_watermark < u->min_wakeup)
285         u->tsched_watermark = u->min_wakeup;
286 }
287
288 static void increase_watermark(struct userdata *u) {
289     size_t old_watermark;
290     pa_usec_t old_min_latency, new_min_latency;
291
292     pa_assert(u);
293     pa_assert(u->use_tsched);
294
295     /* First, just try to increase the watermark */
296     old_watermark = u->tsched_watermark;
297     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
298     fix_tsched_watermark(u);
299
300     if (old_watermark != u->tsched_watermark) {
301         pa_log_info("Increasing wakeup watermark to %0.2f ms",
302                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
303         return;
304     }
305
306     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
307     old_min_latency = u->source->thread_info.min_latency;
308     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
309     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
310
311     if (old_min_latency != new_min_latency) {
312         pa_log_info("Increasing minimal latency to %0.2f ms",
313                     (double) new_min_latency / PA_USEC_PER_MSEC);
314
315         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
316     }
317
318     /* When we reach this we're officialy fucked! */
319 }
320
321 static void decrease_watermark(struct userdata *u) {
322     size_t old_watermark;
323     pa_usec_t now;
324
325     pa_assert(u);
326     pa_assert(u->use_tsched);
327
328     now = pa_rtclock_now();
329
330     if (u->watermark_dec_not_before <= 0)
331         goto restart;
332
333     if (u->watermark_dec_not_before > now)
334         return;
335
336     old_watermark = u->tsched_watermark;
337
338     if (u->tsched_watermark < u->watermark_dec_step)
339         u->tsched_watermark = u->tsched_watermark / 2;
340     else
341         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
342
343     fix_tsched_watermark(u);
344
345     if (old_watermark != u->tsched_watermark)
346         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
347                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
348
349     /* We don't change the latency range*/
350
351 restart:
352     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
353 }
354
355 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
356     pa_usec_t wm, usec;
357
358     pa_assert(sleep_usec);
359     pa_assert(process_usec);
360
361     pa_assert(u);
362     pa_assert(u->use_tsched);
363
364     usec = pa_source_get_requested_latency_within_thread(u->source);
365
366     if (usec == (pa_usec_t) -1)
367         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
368
369     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
370
371     if (wm > usec)
372         wm = usec/2;
373
374     *sleep_usec = usec - wm;
375     *process_usec = wm;
376
377 #ifdef DEBUG_TIMING
378     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
379                  (unsigned long) (usec / PA_USEC_PER_MSEC),
380                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
381                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
382 #endif
383 }
384
385 static int try_recover(struct userdata *u, const char *call, int err) {
386     pa_assert(u);
387     pa_assert(call);
388     pa_assert(err < 0);
389
390     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
391
392     pa_assert(err != -EAGAIN);
393
394     if (err == -EPIPE)
395         pa_log_debug("%s: Buffer overrun!", call);
396
397     if (err == -ESTRPIPE)
398         pa_log_debug("%s: System suspended!", call);
399
400     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
401         pa_log("%s: %s", call, pa_alsa_strerror(err));
402         return -1;
403     }
404
405     u->first = TRUE;
406     return 0;
407 }
408
409 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
410     size_t left_to_record;
411     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
412     pa_bool_t overrun = FALSE;
413
414     /* We use <= instead of < for this check here because an overrun
415      * only happens after the last sample was processed, not already when
416      * it is removed from the buffer. This is particularly important
417      * when block transfer is used. */
418
419     if (n_bytes <= rec_space)
420         left_to_record = rec_space - n_bytes;
421     else {
422
423         /* We got a dropout. What a mess! */
424         left_to_record = 0;
425         overrun = TRUE;
426
427 #ifdef DEBUG_TIMING
428         PA_DEBUG_TRAP;
429 #endif
430
431         if (pa_log_ratelimit(PA_LOG_INFO))
432             pa_log_info("Overrun!");
433     }
434
435 #ifdef DEBUG_TIMING
436     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
437 #endif
438
439     if (u->use_tsched) {
440         pa_bool_t reset_not_before = TRUE;
441
442         if (overrun || left_to_record < u->watermark_inc_threshold)
443             increase_watermark(u);
444         else if (left_to_record > u->watermark_dec_threshold) {
445             reset_not_before = FALSE;
446
447             /* We decrease the watermark only if have actually
448              * been woken up by a timeout. If something else woke
449              * us up it's too easy to fulfill the deadlines... */
450
451             if (on_timeout)
452                 decrease_watermark(u);
453         }
454
455         if (reset_not_before)
456             u->watermark_dec_not_before = 0;
457     }
458
459     return left_to_record;
460 }
461
462 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
463     pa_bool_t work_done = FALSE;
464     pa_usec_t max_sleep_usec = 0, process_usec = 0;
465     size_t left_to_record;
466     unsigned j = 0;
467
468     pa_assert(u);
469     pa_source_assert_ref(u->source);
470
471     if (u->use_tsched)
472         hw_sleep_time(u, &max_sleep_usec, &process_usec);
473
474     for (;;) {
475         snd_pcm_sframes_t n;
476         size_t n_bytes;
477         int r;
478         pa_bool_t after_avail = TRUE;
479
480         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
481
482             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
483                 continue;
484
485             return r;
486         }
487
488         n_bytes = (size_t) n * u->frame_size;
489
490 #ifdef DEBUG_TIMING
491         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 #endif
493
494         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
495         on_timeout = FALSE;
496
497         if (u->use_tsched)
498             if (!polled &&
499                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
500 #ifdef DEBUG_TIMING
501                 pa_log_debug("Not reading, because too early.");
502 #endif
503                 break;
504             }
505
506         if (PA_UNLIKELY(n_bytes <= 0)) {
507
508             if (polled)
509                 PA_ONCE_BEGIN {
510                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
511                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
512                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
513                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
514                            pa_strnull(dn));
515                     pa_xfree(dn);
516                 } PA_ONCE_END;
517
518 #ifdef DEBUG_TIMING
519             pa_log_debug("Not reading, because not necessary.");
520 #endif
521             break;
522         }
523
524
525         if (++j > 10) {
526 #ifdef DEBUG_TIMING
527             pa_log_debug("Not filling up, because already too many iterations.");
528 #endif
529
530             break;
531         }
532
533         polled = FALSE;
534
535 #ifdef DEBUG_TIMING
536         pa_log_debug("Reading");
537 #endif
538
539         for (;;) {
540             pa_memchunk chunk;
541             void *p;
542             int err;
543             const snd_pcm_channel_area_t *areas;
544             snd_pcm_uframes_t offset, frames;
545             snd_pcm_sframes_t sframes;
546
547             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
548 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
549
550             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
551
552                 if (!after_avail && err == -EAGAIN)
553                     break;
554
555                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
556                     continue;
557
558                 return r;
559             }
560
561             /* Make sure that if these memblocks need to be copied they will fit into one slot */
562             if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
563                 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
564
565             if (!after_avail && frames == 0)
566                 break;
567
568             pa_assert(frames > 0);
569             after_avail = FALSE;
570
571             /* Check these are multiples of 8 bit */
572             pa_assert((areas[0].first & 7) == 0);
573             pa_assert((areas[0].step & 7)== 0);
574
575             /* We assume a single interleaved memory buffer */
576             pa_assert((areas[0].first >> 3) == 0);
577             pa_assert((areas[0].step >> 3) == u->frame_size);
578
579             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
580
581             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
582             chunk.length = pa_memblock_get_length(chunk.memblock);
583             chunk.index = 0;
584
585             pa_source_post(u->source, &chunk);
586             pa_memblock_unref_fixed(chunk.memblock);
587
588             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
589
590                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
591                     continue;
592
593                 return r;
594             }
595
596             work_done = TRUE;
597
598             u->read_count += frames * u->frame_size;
599
600 #ifdef DEBUG_TIMING
601             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 #endif
603
604             if ((size_t) frames * u->frame_size >= n_bytes)
605                 break;
606
607             n_bytes -= (size_t) frames * u->frame_size;
608         }
609     }
610
611     if (u->use_tsched) {
612         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
613
614         if (*sleep_usec > process_usec)
615             *sleep_usec -= process_usec;
616         else
617             *sleep_usec = 0;
618     }
619
620     return work_done ? 1 : 0;
621 }
622
623 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
624     int work_done = FALSE;
625     pa_usec_t max_sleep_usec = 0, process_usec = 0;
626     size_t left_to_record;
627     unsigned j = 0;
628
629     pa_assert(u);
630     pa_source_assert_ref(u->source);
631
632     if (u->use_tsched)
633         hw_sleep_time(u, &max_sleep_usec, &process_usec);
634
635     for (;;) {
636         snd_pcm_sframes_t n;
637         size_t n_bytes;
638         int r;
639         pa_bool_t after_avail = TRUE;
640
641         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
642
643             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
644                 continue;
645
646             return r;
647         }
648
649         n_bytes = (size_t) n * u->frame_size;
650         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
651         on_timeout = FALSE;
652
653         if (u->use_tsched)
654             if (!polled &&
655                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
656                 break;
657
658         if (PA_UNLIKELY(n_bytes <= 0)) {
659
660             if (polled)
661                 PA_ONCE_BEGIN {
662                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
663                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
664                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
665                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
666                            pa_strnull(dn));
667                     pa_xfree(dn);
668                 } PA_ONCE_END;
669
670             break;
671         }
672
673         if (++j > 10) {
674 #ifdef DEBUG_TIMING
675             pa_log_debug("Not filling up, because already too many iterations.");
676 #endif
677
678             break;
679         }
680
681         polled = FALSE;
682
683         for (;;) {
684             void *p;
685             snd_pcm_sframes_t frames;
686             pa_memchunk chunk;
687
688             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
689
690             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
691
692             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
693                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
694
695 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
696
697             p = pa_memblock_acquire(chunk.memblock);
698             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
699             pa_memblock_release(chunk.memblock);
700
701             if (PA_UNLIKELY(frames < 0)) {
702                 pa_memblock_unref(chunk.memblock);
703
704                 if (!after_avail && (int) frames == -EAGAIN)
705                     break;
706
707                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
708                     continue;
709
710                 return r;
711             }
712
713             if (!after_avail && frames == 0) {
714                 pa_memblock_unref(chunk.memblock);
715                 break;
716             }
717
718             pa_assert(frames > 0);
719             after_avail = FALSE;
720
721             chunk.index = 0;
722             chunk.length = (size_t) frames * u->frame_size;
723
724             pa_source_post(u->source, &chunk);
725             pa_memblock_unref(chunk.memblock);
726
727             work_done = TRUE;
728
729             u->read_count += frames * u->frame_size;
730
731 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
732
733             if ((size_t) frames * u->frame_size >= n_bytes)
734                 break;
735
736             n_bytes -= (size_t) frames * u->frame_size;
737         }
738     }
739
740     if (u->use_tsched) {
741         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
742
743         if (*sleep_usec > process_usec)
744             *sleep_usec -= process_usec;
745         else
746             *sleep_usec = 0;
747     }
748
749     return work_done ? 1 : 0;
750 }
751
752 static void update_smoother(struct userdata *u) {
753     snd_pcm_sframes_t delay = 0;
754     uint64_t position;
755     int err;
756     pa_usec_t now1 = 0, now2;
757     snd_pcm_status_t *status;
758
759     snd_pcm_status_alloca(&status);
760
761     pa_assert(u);
762     pa_assert(u->pcm_handle);
763
764     /* Let's update the time smoother */
765
766     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
768         return;
769     }
770
771     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
773     else {
774         snd_htimestamp_t htstamp = { 0, 0 };
775         snd_pcm_status_get_htstamp(status, &htstamp);
776         now1 = pa_timespec_load(&htstamp);
777     }
778
779     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
780     if (now1 <= 0)
781         now1 = pa_rtclock_now();
782
783     /* check if the time since the last update is bigger than the interval */
784     if (u->last_smoother_update > 0)
785         if (u->last_smoother_update + u->smoother_interval > now1)
786             return;
787
788     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
790
791     pa_smoother_put(u->smoother, now1, now2);
792
793     u->last_smoother_update = now1;
794     /* exponentially increase the update interval up to the MAX limit */
795     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
796 }
797
798 static pa_usec_t source_get_latency(struct userdata *u) {
799     int64_t delay;
800     pa_usec_t now1, now2;
801
802     pa_assert(u);
803
804     now1 = pa_rtclock_now();
805     now2 = pa_smoother_get(u->smoother, now1);
806
807     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
808
809     return delay >= 0 ? (pa_usec_t) delay : 0;
810 }
811
812 static int build_pollfd(struct userdata *u) {
813     pa_assert(u);
814     pa_assert(u->pcm_handle);
815
816     if (u->alsa_rtpoll_item)
817         pa_rtpoll_item_free(u->alsa_rtpoll_item);
818
819     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
820         return -1;
821
822     return 0;
823 }
824
825 /* Called from IO context */
826 static int suspend(struct userdata *u) {
827     pa_assert(u);
828     pa_assert(u->pcm_handle);
829
830     pa_smoother_pause(u->smoother, pa_rtclock_now());
831
832     /* Let's suspend */
833     snd_pcm_close(u->pcm_handle);
834     u->pcm_handle = NULL;
835
836     if (u->alsa_rtpoll_item) {
837         pa_rtpoll_item_free(u->alsa_rtpoll_item);
838         u->alsa_rtpoll_item = NULL;
839     }
840
841     pa_log_info("Device suspended...");
842
843     return 0;
844 }
845
846 /* Called from IO context */
847 static int update_sw_params(struct userdata *u) {
848     snd_pcm_uframes_t avail_min;
849     int err;
850
851     pa_assert(u);
852
853     /* Use the full buffer if noone asked us for anything specific */
854     u->hwbuf_unused = 0;
855
856     if (u->use_tsched) {
857         pa_usec_t latency;
858
859         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
860             size_t b;
861
862             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
863
864             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
865
866             /* We need at least one sample in our buffer */
867
868             if (PA_UNLIKELY(b < u->frame_size))
869                 b = u->frame_size;
870
871             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
872         }
873
874         fix_min_sleep_wakeup(u);
875         fix_tsched_watermark(u);
876     }
877
878     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
879
880     avail_min = 1;
881
882     if (u->use_tsched) {
883         pa_usec_t sleep_usec, process_usec;
884
885         hw_sleep_time(u, &sleep_usec, &process_usec);
886         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
887     }
888
889     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
890
891     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
892         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
893         return err;
894     }
895
896     return 0;
897 }
898
899 /* Called from IO context */
900 static int unsuspend(struct userdata *u) {
901     pa_sample_spec ss;
902     int err;
903     pa_bool_t b, d;
904     snd_pcm_uframes_t period_size, buffer_size;
905
906     pa_assert(u);
907     pa_assert(!u->pcm_handle);
908
909     pa_log_info("Trying resume...");
910
911     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
912                             SND_PCM_NONBLOCK|
913                             SND_PCM_NO_AUTO_RESAMPLE|
914                             SND_PCM_NO_AUTO_CHANNELS|
915                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
916         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
917         goto fail;
918     }
919
920     ss = u->source->sample_spec;
921     period_size = u->fragment_size / u->frame_size;
922     buffer_size = u->hwbuf_size / u->frame_size;
923     b = u->use_mmap;
924     d = u->use_tsched;
925
926     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
927         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
928         goto fail;
929     }
930
931     if (b != u->use_mmap || d != u->use_tsched) {
932         pa_log_warn("Resume failed, couldn't get original access mode.");
933         goto fail;
934     }
935
936     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
937         pa_log_warn("Resume failed, couldn't restore original sample settings.");
938         goto fail;
939     }
940
941     if (period_size*u->frame_size != u->fragment_size ||
942         buffer_size*u->frame_size != u->hwbuf_size) {
943         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
944                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
945                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
946         goto fail;
947     }
948
949     if (update_sw_params(u) < 0)
950         goto fail;
951
952     if (build_pollfd(u) < 0)
953         goto fail;
954
955     /* FIXME: We need to reload the volume somehow */
956
957     u->read_count = 0;
958     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
959     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
960     u->last_smoother_update = 0;
961
962     u->first = TRUE;
963
964     pa_log_info("Resumed successfully...");
965
966     return 0;
967
968 fail:
969     if (u->pcm_handle) {
970         snd_pcm_close(u->pcm_handle);
971         u->pcm_handle = NULL;
972     }
973
974     return -PA_ERR_IO;
975 }
976
977 /* Called from IO context */
978 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
979     struct userdata *u = PA_SOURCE(o)->userdata;
980
981     switch (code) {
982
983         case PA_SOURCE_MESSAGE_GET_LATENCY: {
984             pa_usec_t r = 0;
985
986             if (u->pcm_handle)
987                 r = source_get_latency(u);
988
989             *((pa_usec_t*) data) = r;
990
991             return 0;
992         }
993
994         case PA_SOURCE_MESSAGE_SET_STATE:
995
996             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
997
998                 case PA_SOURCE_SUSPENDED: {
999                     int r;
1000
1001                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1002
1003                     if ((r = suspend(u)) < 0)
1004                         return r;
1005
1006                     break;
1007                 }
1008
1009                 case PA_SOURCE_IDLE:
1010                 case PA_SOURCE_RUNNING: {
1011                     int r;
1012
1013                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1014                         if (build_pollfd(u) < 0)
1015                             return -PA_ERR_IO;
1016                     }
1017
1018                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1019                         if ((r = unsuspend(u)) < 0)
1020                             return r;
1021                     }
1022
1023                     break;
1024                 }
1025
1026                 case PA_SOURCE_UNLINKED:
1027                 case PA_SOURCE_INIT:
1028                 case PA_SOURCE_INVALID_STATE:
1029                     ;
1030             }
1031
1032             break;
1033     }
1034
1035     return pa_source_process_msg(o, code, data, offset, chunk);
1036 }
1037
1038 /* Called from main context */
1039 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1040     pa_source_state_t old_state;
1041     struct userdata *u;
1042
1043     pa_source_assert_ref(s);
1044     pa_assert_se(u = s->userdata);
1045
1046     old_state = pa_source_get_state(u->source);
1047
1048     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1049         reserve_done(u);
1050     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1051         if (reserve_init(u, u->device_name) < 0)
1052             return -PA_ERR_BUSY;
1053
1054     return 0;
1055 }
1056
1057 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1058     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1059
1060     pa_assert(u);
1061     pa_assert(u->mixer_handle);
1062
1063     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1064         return 0;
1065
1066     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1067         return 0;
1068
1069     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1070         pa_source_get_volume(u->source, TRUE);
1071         pa_source_get_mute(u->source, TRUE);
1072     }
1073
1074     return 0;
1075 }
1076
1077 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1078     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1079
1080     pa_assert(u);
1081     pa_assert(u->mixer_handle);
1082
1083     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1084         return 0;
1085
1086     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1087         return 0;
1088
1089     if (mask & SND_CTL_EVENT_MASK_VALUE)
1090         pa_source_update_volume_and_mute(u->source);
1091
1092     return 0;
1093 }
1094
1095 static void source_get_volume_cb(pa_source *s) {
1096     struct userdata *u = s->userdata;
1097     pa_cvolume r;
1098     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1099
1100     pa_assert(u);
1101     pa_assert(u->mixer_path);
1102     pa_assert(u->mixer_handle);
1103
1104     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1105         return;
1106
1107     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1108     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1109
1110     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1111
1112     if (u->mixer_path->has_dB) {
1113         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1114
1115         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1116     }
1117
1118     if (pa_cvolume_equal(&u->hardware_volume, &r))
1119         return;
1120
1121     s->real_volume = u->hardware_volume = r;
1122
1123     /* Hmm, so the hardware volume changed, let's reset our software volume */
1124     if (u->mixer_path->has_dB)
1125         pa_source_set_soft_volume(s, NULL);
1126 }
1127
1128 static void source_set_volume_cb(pa_source *s) {
1129     struct userdata *u = s->userdata;
1130     pa_cvolume r;
1131     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1132     pa_bool_t write_to_hw = (s->flags & PA_SOURCE_SYNC_VOLUME) ? FALSE : TRUE;
1133
1134     pa_assert(u);
1135     pa_assert(u->mixer_path);
1136     pa_assert(u->mixer_handle);
1137
1138     /* Shift up by the base volume */
1139     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1140
1141     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, write_to_hw) < 0)
1142         return;
1143
1144     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1145     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1146
1147     u->hardware_volume = r;
1148
1149     if (u->mixer_path->has_dB) {
1150         pa_cvolume new_soft_volume;
1151         pa_bool_t accurate_enough;
1152         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1153
1154         /* Match exactly what the user requested by software */
1155         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1156
1157         /* If the adjustment to do in software is only minimal we
1158          * can skip it. That saves us CPU at the expense of a bit of
1159          * accuracy */
1160         accurate_enough =
1161             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1162             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1163
1164         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1165         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1166         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1167         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1168         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1169                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1170                      pa_yes_no(accurate_enough));
1171         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1172
1173         if (!accurate_enough)
1174             s->soft_volume = new_soft_volume;
1175
1176     } else {
1177         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1178
1179         /* We can't match exactly what the user requested, hence let's
1180          * at least tell the user about it */
1181
1182         s->real_volume = r;
1183     }
1184 }
1185
1186 static void source_write_volume_cb(pa_source *s) {
1187     struct userdata *u = s->userdata;
1188     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1189
1190     pa_assert(u);
1191     pa_assert(u->mixer_path);
1192     pa_assert(u->mixer_handle);
1193     pa_assert(s->flags & PA_SOURCE_SYNC_VOLUME);
1194
1195     /* Shift up by the base volume */
1196     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1197
1198     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE) < 0)
1199         pa_log_error("Writing HW volume failed");
1200     else {
1201         pa_cvolume tmp_vol;
1202         pa_bool_t accurate_enough;
1203
1204         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1205         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1206
1207         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1208         accurate_enough =
1209             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1210             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1211
1212         if (!accurate_enough) {
1213             union {
1214                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1215                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1216             } vol;
1217
1218             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1219                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1220                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1221             pa_log_debug("                                           in dB: %s (request) != %s",
1222                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1223                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1224         }
1225     }
1226 }
1227
1228 static void source_get_mute_cb(pa_source *s) {
1229     struct userdata *u = s->userdata;
1230     pa_bool_t b;
1231
1232     pa_assert(u);
1233     pa_assert(u->mixer_path);
1234     pa_assert(u->mixer_handle);
1235
1236     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1237         return;
1238
1239     s->muted = b;
1240 }
1241
1242 static void source_set_mute_cb(pa_source *s) {
1243     struct userdata *u = s->userdata;
1244
1245     pa_assert(u);
1246     pa_assert(u->mixer_path);
1247     pa_assert(u->mixer_handle);
1248
1249     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1250 }
1251
1252 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1253     struct userdata *u = s->userdata;
1254     pa_alsa_port_data *data;
1255
1256     pa_assert(u);
1257     pa_assert(p);
1258     pa_assert(u->mixer_handle);
1259
1260     data = PA_DEVICE_PORT_DATA(p);
1261
1262     pa_assert_se(u->mixer_path = data->path);
1263     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1264
1265     if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1266         s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1267         s->n_volume_steps = PA_VOLUME_NORM+1;
1268
1269         pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1270     } else {
1271         s->base_volume = PA_VOLUME_NORM;
1272         s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1273     }
1274
1275     if (data->setting)
1276         pa_alsa_setting_select(data->setting, u->mixer_handle);
1277
1278     if (s->set_mute)
1279         s->set_mute(s);
1280     if (s->set_volume)
1281         s->set_volume(s);
1282
1283     return 0;
1284 }
1285
1286 static void source_update_requested_latency_cb(pa_source *s) {
1287     struct userdata *u = s->userdata;
1288     pa_assert(u);
1289     pa_assert(u->use_tsched); /* only when timer scheduling is used
1290                                * we can dynamically adjust the
1291                                * latency */
1292
1293     if (!u->pcm_handle)
1294         return;
1295
1296     update_sw_params(u);
1297 }
1298
1299 static void thread_func(void *userdata) {
1300     struct userdata *u = userdata;
1301     unsigned short revents = 0;
1302
1303     pa_assert(u);
1304
1305     pa_log_debug("Thread starting up");
1306
1307     if (u->core->realtime_scheduling)
1308         pa_make_realtime(u->core->realtime_priority);
1309
1310     pa_thread_mq_install(&u->thread_mq);
1311
1312     for (;;) {
1313         int ret;
1314         pa_usec_t rtpoll_sleep = 0;
1315
1316 #ifdef DEBUG_TIMING
1317         pa_log_debug("Loop");
1318 #endif
1319
1320         /* Read some data and pass it to the sources */
1321         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1322             int work_done;
1323             pa_usec_t sleep_usec = 0;
1324             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1325
1326             if (u->first) {
1327                 pa_log_info("Starting capture.");
1328                 snd_pcm_start(u->pcm_handle);
1329
1330                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1331
1332                 u->first = FALSE;
1333             }
1334
1335             if (u->use_mmap)
1336                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1337             else
1338                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1339
1340             if (work_done < 0)
1341                 goto fail;
1342
1343 /*             pa_log_debug("work_done = %i", work_done); */
1344
1345             if (work_done)
1346                 update_smoother(u);
1347
1348             if (u->use_tsched) {
1349                 pa_usec_t cusec;
1350
1351                 /* OK, the capture buffer is now empty, let's
1352                  * calculate when to wake up next */
1353
1354 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1355
1356                 /* Convert from the sound card time domain to the
1357                  * system time domain */
1358                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1359
1360 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1361
1362                 /* We don't trust the conversion, so we wake up whatever comes first */
1363                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1364             }
1365         }
1366
1367         if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1368             pa_usec_t volume_sleep;
1369             pa_source_volume_change_apply(u->source, &volume_sleep);
1370             if (volume_sleep > 0)
1371                 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1372         }
1373
1374         if (rtpoll_sleep > 0)
1375             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1376         else
1377             pa_rtpoll_set_timer_disabled(u->rtpoll);
1378
1379         /* Hmm, nothing to do. Let's sleep */
1380         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1381             goto fail;
1382
1383         if (u->source->flags & PA_SOURCE_SYNC_VOLUME)
1384             pa_source_volume_change_apply(u->source, NULL);
1385
1386         if (ret == 0)
1387             goto finish;
1388
1389         /* Tell ALSA about this and process its response */
1390         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1391             struct pollfd *pollfd;
1392             int err;
1393             unsigned n;
1394
1395             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1396
1397             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1398                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1399                 goto fail;
1400             }
1401
1402             if (revents & ~POLLIN) {
1403                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1404                     goto fail;
1405
1406                 u->first = TRUE;
1407             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1408                 pa_log_debug("Wakeup from ALSA!");
1409
1410         } else
1411             revents = 0;
1412     }
1413
1414 fail:
1415     /* If this was no regular exit from the loop we have to continue
1416      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1417     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1418     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1419
1420 finish:
1421     pa_log_debug("Thread shutting down");
1422 }
1423
1424 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1425     const char *n;
1426     char *t;
1427
1428     pa_assert(data);
1429     pa_assert(ma);
1430     pa_assert(device_name);
1431
1432     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1433         pa_source_new_data_set_name(data, n);
1434         data->namereg_fail = TRUE;
1435         return;
1436     }
1437
1438     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1439         data->namereg_fail = TRUE;
1440     else {
1441         n = device_id ? device_id : device_name;
1442         data->namereg_fail = FALSE;
1443     }
1444
1445     if (mapping)
1446         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1447     else
1448         t = pa_sprintf_malloc("alsa_input.%s", n);
1449
1450     pa_source_new_data_set_name(data, t);
1451     pa_xfree(t);
1452 }
1453
1454 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1455
1456     if (!mapping && !element)
1457         return;
1458
1459     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1460         pa_log_info("Failed to find a working mixer device.");
1461         return;
1462     }
1463
1464     if (element) {
1465
1466         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1467             goto fail;
1468
1469         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1470             goto fail;
1471
1472         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1473         pa_alsa_path_dump(u->mixer_path);
1474     } else {
1475
1476         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1477             goto fail;
1478
1479         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1480
1481         pa_log_debug("Probed mixer paths:");
1482         pa_alsa_path_set_dump(u->mixer_path_set);
1483     }
1484
1485     return;
1486
1487 fail:
1488
1489     if (u->mixer_path_set) {
1490         pa_alsa_path_set_free(u->mixer_path_set);
1491         u->mixer_path_set = NULL;
1492     } else if (u->mixer_path) {
1493         pa_alsa_path_free(u->mixer_path);
1494         u->mixer_path = NULL;
1495     }
1496
1497     if (u->mixer_handle) {
1498         snd_mixer_close(u->mixer_handle);
1499         u->mixer_handle = NULL;
1500     }
1501 }
1502
1503 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1504     pa_assert(u);
1505
1506     if (!u->mixer_handle)
1507         return 0;
1508
1509     if (u->source->active_port) {
1510         pa_alsa_port_data *data;
1511
1512         /* We have a list of supported paths, so let's activate the
1513          * one that has been chosen as active */
1514
1515         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1516         u->mixer_path = data->path;
1517
1518         pa_alsa_path_select(data->path, u->mixer_handle);
1519
1520         if (data->setting)
1521             pa_alsa_setting_select(data->setting, u->mixer_handle);
1522
1523     } else {
1524
1525         if (!u->mixer_path && u->mixer_path_set)
1526             u->mixer_path = u->mixer_path_set->paths;
1527
1528         if (u->mixer_path) {
1529             /* Hmm, we have only a single path, then let's activate it */
1530
1531             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1532
1533             if (u->mixer_path->settings)
1534                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1535         } else
1536             return 0;
1537     }
1538
1539     if (!u->mixer_path->has_volume)
1540         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1541     else {
1542
1543         if (u->mixer_path->has_dB) {
1544             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1545
1546             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1547             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1548
1549             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1550
1551         } else {
1552             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1553             u->source->base_volume = PA_VOLUME_NORM;
1554             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1555         }
1556
1557         u->source->get_volume = source_get_volume_cb;
1558         u->source->set_volume = source_set_volume_cb;
1559         u->source->write_volume = source_write_volume_cb;
1560
1561         u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL;
1562         if (u->mixer_path->has_dB) {
1563             u->source->flags |= PA_SOURCE_DECIBEL_VOLUME;
1564             if (sync_volume) {
1565                 u->source->flags |= PA_SOURCE_SYNC_VOLUME;
1566                 pa_log_info("Successfully enabled synchronous volume.");
1567             }
1568         }
1569
1570         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1571     }
1572
1573     if (!u->mixer_path->has_mute) {
1574         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1575     } else {
1576         u->source->get_mute = source_get_mute_cb;
1577         u->source->set_mute = source_set_mute_cb;
1578         u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1579         pa_log_info("Using hardware mute control.");
1580     }
1581
1582     if (u->source->flags & (PA_SOURCE_HW_VOLUME_CTRL|PA_SOURCE_HW_MUTE_CTRL)) {
1583         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1584         if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1585             u->mixer_pd = pa_alsa_mixer_pdata_new();
1586             mixer_callback = io_mixer_callback;
1587
1588             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1589                 pa_log("Failed to initialize file descriptor monitoring");
1590                 return -1;
1591             }
1592         } else {
1593             u->mixer_fdl = pa_alsa_fdlist_new();
1594             mixer_callback = ctl_mixer_callback;
1595
1596             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1597                 pa_log("Failed to initialize file descriptor monitoring");
1598                 return -1;
1599             }
1600         }
1601
1602         if (u->mixer_path_set)
1603             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1604         else
1605             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1606     }
1607
1608     return 0;
1609 }
1610
1611 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1612
1613     struct userdata *u = NULL;
1614     const char *dev_id = NULL;
1615     pa_sample_spec ss, requested_ss;
1616     pa_channel_map map;
1617     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1618     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1619     size_t frame_size;
1620     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1621     pa_source_new_data data;
1622     pa_alsa_profile_set *profile_set = NULL;
1623
1624     pa_assert(m);
1625     pa_assert(ma);
1626
1627     ss = m->core->default_sample_spec;
1628     map = m->core->default_channel_map;
1629     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1630         pa_log("Failed to parse sample specification and channel map");
1631         goto fail;
1632     }
1633
1634     requested_ss = ss;
1635     frame_size = pa_frame_size(&ss);
1636
1637     nfrags = m->core->default_n_fragments;
1638     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1639     if (frag_size <= 0)
1640         frag_size = (uint32_t) frame_size;
1641     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1642     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1643
1644     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1645         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1646         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1647         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1648         pa_log("Failed to parse buffer metrics");
1649         goto fail;
1650     }
1651
1652     buffer_size = nfrags * frag_size;
1653
1654     period_frames = frag_size/frame_size;
1655     buffer_frames = buffer_size/frame_size;
1656     tsched_frames = tsched_size/frame_size;
1657
1658     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1659         pa_log("Failed to parse mmap argument.");
1660         goto fail;
1661     }
1662
1663     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1664         pa_log("Failed to parse tsched argument.");
1665         goto fail;
1666     }
1667
1668     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1669         pa_log("Failed to parse ignore_dB argument.");
1670         goto fail;
1671     }
1672
1673     sync_volume = m->core->sync_volume;
1674     if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1675         pa_log("Failed to parse sync_volume argument.");
1676         goto fail;
1677     }
1678
1679     use_tsched = pa_alsa_may_tsched(use_tsched);
1680
1681     u = pa_xnew0(struct userdata, 1);
1682     u->core = m->core;
1683     u->module = m;
1684     u->use_mmap = use_mmap;
1685     u->use_tsched = use_tsched;
1686     u->first = TRUE;
1687     u->rtpoll = pa_rtpoll_new();
1688     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1689
1690     u->smoother = pa_smoother_new(
1691             SMOOTHER_ADJUST_USEC,
1692             SMOOTHER_WINDOW_USEC,
1693             TRUE,
1694             TRUE,
1695             5,
1696             pa_rtclock_now(),
1697             TRUE);
1698     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1699
1700     dev_id = pa_modargs_get_value(
1701             ma, "device_id",
1702             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1703
1704     if (reserve_init(u, dev_id) < 0)
1705         goto fail;
1706
1707     if (reserve_monitor_init(u, dev_id) < 0)
1708         goto fail;
1709
1710     b = use_mmap;
1711     d = use_tsched;
1712
1713     if (mapping) {
1714
1715         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1716             pa_log("device_id= not set");
1717             goto fail;
1718         }
1719
1720         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1721                       dev_id,
1722                       &u->device_name,
1723                       &ss, &map,
1724                       SND_PCM_STREAM_CAPTURE,
1725                       &period_frames, &buffer_frames, tsched_frames,
1726                       &b, &d, mapping)))
1727             goto fail;
1728
1729     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1730
1731         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1732             goto fail;
1733
1734         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1735                       dev_id,
1736                       &u->device_name,
1737                       &ss, &map,
1738                       SND_PCM_STREAM_CAPTURE,
1739                       &period_frames, &buffer_frames, tsched_frames,
1740                       &b, &d, profile_set, &mapping)))
1741             goto fail;
1742
1743     } else {
1744
1745         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1746                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1747                       &u->device_name,
1748                       &ss, &map,
1749                       SND_PCM_STREAM_CAPTURE,
1750                       &period_frames, &buffer_frames, tsched_frames,
1751                       &b, &d, FALSE)))
1752             goto fail;
1753     }
1754
1755     pa_assert(u->device_name);
1756     pa_log_info("Successfully opened device %s.", u->device_name);
1757
1758     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1759         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1760         goto fail;
1761     }
1762
1763     if (mapping)
1764         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1765
1766     if (use_mmap && !b) {
1767         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1768         u->use_mmap = use_mmap = FALSE;
1769     }
1770
1771     if (use_tsched && (!b || !d)) {
1772         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1773         u->use_tsched = use_tsched = FALSE;
1774     }
1775
1776     if (u->use_mmap)
1777         pa_log_info("Successfully enabled mmap() mode.");
1778
1779     if (u->use_tsched)
1780         pa_log_info("Successfully enabled timer-based scheduling mode.");
1781
1782     /* ALSA might tweak the sample spec, so recalculate the frame size */
1783     frame_size = pa_frame_size(&ss);
1784
1785     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1786
1787     pa_source_new_data_init(&data);
1788     data.driver = driver;
1789     data.module = m;
1790     data.card = card;
1791     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1792
1793     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1794      * variable instead of using &data.namereg_fail directly, because
1795      * data.namereg_fail is a bitfield and taking the address of a bitfield
1796      * variable is impossible. */
1797     namereg_fail = data.namereg_fail;
1798     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1799         pa_log("Failed to parse boolean argument namereg_fail.");
1800         pa_source_new_data_done(&data);
1801         goto fail;
1802     }
1803     data.namereg_fail = namereg_fail;
1804
1805     pa_source_new_data_set_sample_spec(&data, &ss);
1806     pa_source_new_data_set_channel_map(&data, &map);
1807
1808     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1809     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1810     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1811     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1812     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1813
1814     if (mapping) {
1815         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1816         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1817     }
1818
1819     pa_alsa_init_description(data.proplist);
1820
1821     if (u->control_device)
1822         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1823
1824     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1825         pa_log("Invalid properties");
1826         pa_source_new_data_done(&data);
1827         goto fail;
1828     }
1829
1830     if (u->mixer_path_set)
1831         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1832
1833     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1834     pa_source_new_data_done(&data);
1835
1836     if (!u->source) {
1837         pa_log("Failed to create source object");
1838         goto fail;
1839     }
1840
1841     if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
1842                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1843         pa_log("Failed to parse sync_volume_safety_margin parameter");
1844         goto fail;
1845     }
1846
1847     if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
1848                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1849         pa_log("Failed to parse sync_volume_extra_delay parameter");
1850         goto fail;
1851     }
1852
1853     u->source->parent.process_msg = source_process_msg;
1854     if (u->use_tsched)
1855         u->source->update_requested_latency = source_update_requested_latency_cb;
1856     u->source->set_state = source_set_state_cb;
1857     u->source->set_port = source_set_port_cb;
1858     u->source->userdata = u;
1859
1860     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1861     pa_source_set_rtpoll(u->source, u->rtpoll);
1862
1863     u->frame_size = frame_size;
1864     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1865     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1866     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1867
1868     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1869                 (double) u->hwbuf_size / (double) u->fragment_size,
1870                 (long unsigned) u->fragment_size,
1871                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1872                 (long unsigned) u->hwbuf_size,
1873                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1874
1875     if (u->use_tsched) {
1876         u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1877
1878         u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1879         u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1880
1881         u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1882         u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1883
1884         fix_min_sleep_wakeup(u);
1885         fix_tsched_watermark(u);
1886
1887         pa_source_set_latency_range(u->source,
1888                                     0,
1889                                     pa_bytes_to_usec(u->hwbuf_size, &ss));
1890
1891         pa_log_info("Time scheduling watermark is %0.2fms",
1892                     (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1893     } else
1894         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1895
1896     reserve_update(u);
1897
1898     if (update_sw_params(u) < 0)
1899         goto fail;
1900
1901     if (setup_mixer(u, ignore_dB, sync_volume) < 0)
1902         goto fail;
1903
1904     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1905
1906     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1907         pa_log("Failed to create thread.");
1908         goto fail;
1909     }
1910
1911     /* Get initial mixer settings */
1912     if (data.volume_is_set) {
1913         if (u->source->set_volume)
1914             u->source->set_volume(u->source);
1915     } else {
1916         if (u->source->get_volume)
1917             u->source->get_volume(u->source);
1918     }
1919
1920     if (data.muted_is_set) {
1921         if (u->source->set_mute)
1922             u->source->set_mute(u->source);
1923     } else {
1924         if (u->source->get_mute)
1925             u->source->get_mute(u->source);
1926     }
1927
1928     pa_source_put(u->source);
1929
1930     if (profile_set)
1931         pa_alsa_profile_set_free(profile_set);
1932
1933     return u->source;
1934
1935 fail:
1936
1937     if (u)
1938         userdata_free(u);
1939
1940     if (profile_set)
1941         pa_alsa_profile_set_free(profile_set);
1942
1943     return NULL;
1944 }
1945
1946 static void userdata_free(struct userdata *u) {
1947     pa_assert(u);
1948
1949     if (u->source)
1950         pa_source_unlink(u->source);
1951
1952     if (u->thread) {
1953         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1954         pa_thread_free(u->thread);
1955     }
1956
1957     pa_thread_mq_done(&u->thread_mq);
1958
1959     if (u->source)
1960         pa_source_unref(u->source);
1961
1962     if (u->mixer_pd)
1963         pa_alsa_mixer_pdata_free(u->mixer_pd);
1964
1965     if (u->alsa_rtpoll_item)
1966         pa_rtpoll_item_free(u->alsa_rtpoll_item);
1967
1968     if (u->rtpoll)
1969         pa_rtpoll_free(u->rtpoll);
1970
1971     if (u->pcm_handle) {
1972         snd_pcm_drop(u->pcm_handle);
1973         snd_pcm_close(u->pcm_handle);
1974     }
1975
1976     if (u->mixer_fdl)
1977         pa_alsa_fdlist_free(u->mixer_fdl);
1978
1979     if (u->mixer_path_set)
1980         pa_alsa_path_set_free(u->mixer_path_set);
1981     else if (u->mixer_path)
1982         pa_alsa_path_free(u->mixer_path);
1983
1984     if (u->mixer_handle)
1985         snd_mixer_close(u->mixer_handle);
1986
1987     if (u->smoother)
1988         pa_smoother_free(u->smoother);
1989
1990     reserve_done(u);
1991     monitor_done(u);
1992
1993     pa_xfree(u->device_name);
1994     pa_xfree(u->control_device);
1995     pa_xfree(u);
1996 }
1997
1998 void pa_alsa_source_free(pa_source *s) {
1999     struct userdata *u;
2000
2001     pa_source_assert_ref(s);
2002     pa_assert_se(u = s->userdata);
2003
2004     userdata_free(u);
2005 }