alsa: No need to go via sink/source to get the core.
[profile/ivi/pulseaudio-panda.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     size_t
102         frame_size,
103         fragment_size,
104         hwbuf_size,
105         tsched_watermark,
106         hwbuf_unused,
107         min_sleep,
108         min_wakeup,
109         watermark_inc_step,
110         watermark_dec_step,
111         watermark_inc_threshold,
112         watermark_dec_threshold;
113
114     pa_usec_t watermark_dec_not_before;
115
116     char *device_name;  /* name of the PCM device */
117     char *control_device; /* name of the control device */
118
119     pa_bool_t use_mmap:1, use_tsched:1, sync_volume:1;
120
121     pa_bool_t first;
122
123     pa_rtpoll_item *alsa_rtpoll_item;
124
125     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
126
127     pa_smoother *smoother;
128     uint64_t read_count;
129     pa_usec_t smoother_interval;
130     pa_usec_t last_smoother_update;
131
132     pa_reserve_wrapper *reserve;
133     pa_hook_slot *reserve_slot;
134     pa_reserve_monitor_wrapper *monitor;
135     pa_hook_slot *monitor_slot;
136 };
137
138 static void userdata_free(struct userdata *u);
139
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
141     pa_assert(r);
142     pa_assert(u);
143
144     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145         return PA_HOOK_CANCEL;
146
147     return PA_HOOK_OK;
148 }
149
150 static void reserve_done(struct userdata *u) {
151     pa_assert(u);
152
153     if (u->reserve_slot) {
154         pa_hook_slot_free(u->reserve_slot);
155         u->reserve_slot = NULL;
156     }
157
158     if (u->reserve) {
159         pa_reserve_wrapper_unref(u->reserve);
160         u->reserve = NULL;
161     }
162 }
163
164 static void reserve_update(struct userdata *u) {
165     const char *description;
166     pa_assert(u);
167
168     if (!u->source || !u->reserve)
169         return;
170
171     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
173 }
174
175 static int reserve_init(struct userdata *u, const char *dname) {
176     char *rname;
177
178     pa_assert(u);
179     pa_assert(dname);
180
181     if (u->reserve)
182         return 0;
183
184     if (pa_in_system_mode())
185         return 0;
186
187     if (!(rname = pa_alsa_get_reserve_name(dname)))
188         return 0;
189
190     /* We are resuming, try to lock the device */
191     u->reserve = pa_reserve_wrapper_get(u->core, rname);
192     pa_xfree(rname);
193
194     if (!(u->reserve))
195         return -1;
196
197     reserve_update(u);
198
199     pa_assert(!u->reserve_slot);
200     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
201
202     return 0;
203 }
204
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
206     pa_bool_t b;
207
208     pa_assert(w);
209     pa_assert(u);
210
211     b = PA_PTR_TO_UINT(busy) && !u->reserve;
212
213     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
214     return PA_HOOK_OK;
215 }
216
217 static void monitor_done(struct userdata *u) {
218     pa_assert(u);
219
220     if (u->monitor_slot) {
221         pa_hook_slot_free(u->monitor_slot);
222         u->monitor_slot = NULL;
223     }
224
225     if (u->monitor) {
226         pa_reserve_monitor_wrapper_unref(u->monitor);
227         u->monitor = NULL;
228     }
229 }
230
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
232     char *rname;
233
234     pa_assert(u);
235     pa_assert(dname);
236
237     if (pa_in_system_mode())
238         return 0;
239
240     if (!(rname = pa_alsa_get_reserve_name(dname)))
241         return 0;
242
243     /* We are resuming, try to lock the device */
244     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
245     pa_xfree(rname);
246
247     if (!(u->monitor))
248         return -1;
249
250     pa_assert(!u->monitor_slot);
251     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
252
253     return 0;
254 }
255
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257     size_t max_use, max_use_2;
258
259     pa_assert(u);
260     pa_assert(u->use_tsched);
261
262     max_use = u->hwbuf_size - u->hwbuf_unused;
263     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
264
265     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
267
268     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
270 }
271
272 static void fix_tsched_watermark(struct userdata *u) {
273     size_t max_use;
274     pa_assert(u);
275     pa_assert(u->use_tsched);
276
277     max_use = u->hwbuf_size - u->hwbuf_unused;
278
279     if (u->tsched_watermark > max_use - u->min_sleep)
280         u->tsched_watermark = max_use - u->min_sleep;
281
282     if (u->tsched_watermark < u->min_wakeup)
283         u->tsched_watermark = u->min_wakeup;
284 }
285
286 static void increase_watermark(struct userdata *u) {
287     size_t old_watermark;
288     pa_usec_t old_min_latency, new_min_latency;
289
290     pa_assert(u);
291     pa_assert(u->use_tsched);
292
293     /* First, just try to increase the watermark */
294     old_watermark = u->tsched_watermark;
295     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296     fix_tsched_watermark(u);
297
298     if (old_watermark != u->tsched_watermark) {
299         pa_log_info("Increasing wakeup watermark to %0.2f ms",
300                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
301         return;
302     }
303
304     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305     old_min_latency = u->source->thread_info.min_latency;
306     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
308
309     if (old_min_latency != new_min_latency) {
310         pa_log_info("Increasing minimal latency to %0.2f ms",
311                     (double) new_min_latency / PA_USEC_PER_MSEC);
312
313         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
314     }
315
316     /* When we reach this we're officialy fucked! */
317 }
318
319 static void decrease_watermark(struct userdata *u) {
320     size_t old_watermark;
321     pa_usec_t now;
322
323     pa_assert(u);
324     pa_assert(u->use_tsched);
325
326     now = pa_rtclock_now();
327
328     if (u->watermark_dec_not_before <= 0)
329         goto restart;
330
331     if (u->watermark_dec_not_before > now)
332         return;
333
334     old_watermark = u->tsched_watermark;
335
336     if (u->tsched_watermark < u->watermark_dec_step)
337         u->tsched_watermark = u->tsched_watermark / 2;
338     else
339         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
340
341     fix_tsched_watermark(u);
342
343     if (old_watermark != u->tsched_watermark)
344         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
346
347     /* We don't change the latency range*/
348
349 restart:
350     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
351 }
352
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
354     pa_usec_t wm, usec;
355
356     pa_assert(sleep_usec);
357     pa_assert(process_usec);
358
359     pa_assert(u);
360     pa_assert(u->use_tsched);
361
362     usec = pa_source_get_requested_latency_within_thread(u->source);
363
364     if (usec == (pa_usec_t) -1)
365         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
366
367     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
368
369     if (wm > usec)
370         wm = usec/2;
371
372     *sleep_usec = usec - wm;
373     *process_usec = wm;
374
375 #ifdef DEBUG_TIMING
376     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377                  (unsigned long) (usec / PA_USEC_PER_MSEC),
378                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 #endif
381 }
382
383 static int try_recover(struct userdata *u, const char *call, int err) {
384     pa_assert(u);
385     pa_assert(call);
386     pa_assert(err < 0);
387
388     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
389
390     pa_assert(err != -EAGAIN);
391
392     if (err == -EPIPE)
393         pa_log_debug("%s: Buffer overrun!", call);
394
395     if (err == -ESTRPIPE)
396         pa_log_debug("%s: System suspended!", call);
397
398     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399         pa_log("%s: %s", call, pa_alsa_strerror(err));
400         return -1;
401     }
402
403     u->first = TRUE;
404     return 0;
405 }
406
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408     size_t left_to_record;
409     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410     pa_bool_t overrun = FALSE;
411
412     /* We use <= instead of < for this check here because an overrun
413      * only happens after the last sample was processed, not already when
414      * it is removed from the buffer. This is particularly important
415      * when block transfer is used. */
416
417     if (n_bytes <= rec_space)
418         left_to_record = rec_space - n_bytes;
419     else {
420
421         /* We got a dropout. What a mess! */
422         left_to_record = 0;
423         overrun = TRUE;
424
425 #ifdef DEBUG_TIMING
426         PA_DEBUG_TRAP;
427 #endif
428
429         if (pa_log_ratelimit(PA_LOG_INFO))
430             pa_log_info("Overrun!");
431     }
432
433 #ifdef DEBUG_TIMING
434     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
435 #endif
436
437     if (u->use_tsched) {
438         pa_bool_t reset_not_before = TRUE;
439
440         if (overrun || left_to_record < u->watermark_inc_threshold)
441             increase_watermark(u);
442         else if (left_to_record > u->watermark_dec_threshold) {
443             reset_not_before = FALSE;
444
445             /* We decrease the watermark only if have actually
446              * been woken up by a timeout. If something else woke
447              * us up it's too easy to fulfill the deadlines... */
448
449             if (on_timeout)
450                 decrease_watermark(u);
451         }
452
453         if (reset_not_before)
454             u->watermark_dec_not_before = 0;
455     }
456
457     return left_to_record;
458 }
459
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461     pa_bool_t work_done = FALSE;
462     pa_usec_t max_sleep_usec = 0, process_usec = 0;
463     size_t left_to_record;
464     unsigned j = 0;
465
466     pa_assert(u);
467     pa_source_assert_ref(u->source);
468
469     if (u->use_tsched)
470         hw_sleep_time(u, &max_sleep_usec, &process_usec);
471
472     for (;;) {
473         snd_pcm_sframes_t n;
474         size_t n_bytes;
475         int r;
476         pa_bool_t after_avail = TRUE;
477
478         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
479
480             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
481                 continue;
482
483             return r;
484         }
485
486         n_bytes = (size_t) n * u->frame_size;
487
488 #ifdef DEBUG_TIMING
489         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
490 #endif
491
492         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
493         on_timeout = FALSE;
494
495         if (u->use_tsched)
496             if (!polled &&
497                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
498 #ifdef DEBUG_TIMING
499                 pa_log_debug("Not reading, because too early.");
500 #endif
501                 break;
502             }
503
504         if (PA_UNLIKELY(n_bytes <= 0)) {
505
506             if (polled)
507                 PA_ONCE_BEGIN {
508                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
512                            pa_strnull(dn));
513                     pa_xfree(dn);
514                 } PA_ONCE_END;
515
516 #ifdef DEBUG_TIMING
517             pa_log_debug("Not reading, because not necessary.");
518 #endif
519             break;
520         }
521
522
523         if (++j > 10) {
524 #ifdef DEBUG_TIMING
525             pa_log_debug("Not filling up, because already too many iterations.");
526 #endif
527
528             break;
529         }
530
531         polled = FALSE;
532
533 #ifdef DEBUG_TIMING
534         pa_log_debug("Reading");
535 #endif
536
537         for (;;) {
538             pa_memchunk chunk;
539             void *p;
540             int err;
541             const snd_pcm_channel_area_t *areas;
542             snd_pcm_uframes_t offset, frames;
543             snd_pcm_sframes_t sframes;
544
545             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
547
548             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
549
550                 if (!after_avail && err == -EAGAIN)
551                     break;
552
553                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
554                     continue;
555
556                 return r;
557             }
558
559             /* Make sure that if these memblocks need to be copied they will fit into one slot */
560             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
561                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
562
563             if (!after_avail && frames == 0)
564                 break;
565
566             pa_assert(frames > 0);
567             after_avail = FALSE;
568
569             /* Check these are multiples of 8 bit */
570             pa_assert((areas[0].first & 7) == 0);
571             pa_assert((areas[0].step & 7)== 0);
572
573             /* We assume a single interleaved memory buffer */
574             pa_assert((areas[0].first >> 3) == 0);
575             pa_assert((areas[0].step >> 3) == u->frame_size);
576
577             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
578
579             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580             chunk.length = pa_memblock_get_length(chunk.memblock);
581             chunk.index = 0;
582
583             pa_source_post(u->source, &chunk);
584             pa_memblock_unref_fixed(chunk.memblock);
585
586             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
587
588                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
589                     continue;
590
591                 return r;
592             }
593
594             work_done = TRUE;
595
596             u->read_count += frames * u->frame_size;
597
598 #ifdef DEBUG_TIMING
599             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
600 #endif
601
602             if ((size_t) frames * u->frame_size >= n_bytes)
603                 break;
604
605             n_bytes -= (size_t) frames * u->frame_size;
606         }
607     }
608
609     if (u->use_tsched) {
610         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
611
612         if (*sleep_usec > process_usec)
613             *sleep_usec -= process_usec;
614         else
615             *sleep_usec = 0;
616     }
617
618     return work_done ? 1 : 0;
619 }
620
621 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
622     int work_done = FALSE;
623     pa_usec_t max_sleep_usec = 0, process_usec = 0;
624     size_t left_to_record;
625     unsigned j = 0;
626
627     pa_assert(u);
628     pa_source_assert_ref(u->source);
629
630     if (u->use_tsched)
631         hw_sleep_time(u, &max_sleep_usec, &process_usec);
632
633     for (;;) {
634         snd_pcm_sframes_t n;
635         size_t n_bytes;
636         int r;
637         pa_bool_t after_avail = TRUE;
638
639         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
640
641             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
642                 continue;
643
644             return r;
645         }
646
647         n_bytes = (size_t) n * u->frame_size;
648         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
649         on_timeout = FALSE;
650
651         if (u->use_tsched)
652             if (!polled &&
653                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
654                 break;
655
656         if (PA_UNLIKELY(n_bytes <= 0)) {
657
658             if (polled)
659                 PA_ONCE_BEGIN {
660                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
661                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
662                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
663                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
664                            pa_strnull(dn));
665                     pa_xfree(dn);
666                 } PA_ONCE_END;
667
668             break;
669         }
670
671         if (++j > 10) {
672 #ifdef DEBUG_TIMING
673             pa_log_debug("Not filling up, because already too many iterations.");
674 #endif
675
676             break;
677         }
678
679         polled = FALSE;
680
681         for (;;) {
682             void *p;
683             snd_pcm_sframes_t frames;
684             pa_memchunk chunk;
685
686             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
687
688             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
689
690             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
691                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
692
693 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
694
695             p = pa_memblock_acquire(chunk.memblock);
696             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
697             pa_memblock_release(chunk.memblock);
698
699             if (PA_UNLIKELY(frames < 0)) {
700                 pa_memblock_unref(chunk.memblock);
701
702                 if (!after_avail && (int) frames == -EAGAIN)
703                     break;
704
705                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
706                     continue;
707
708                 return r;
709             }
710
711             if (!after_avail && frames == 0) {
712                 pa_memblock_unref(chunk.memblock);
713                 break;
714             }
715
716             pa_assert(frames > 0);
717             after_avail = FALSE;
718
719             chunk.index = 0;
720             chunk.length = (size_t) frames * u->frame_size;
721
722             pa_source_post(u->source, &chunk);
723             pa_memblock_unref(chunk.memblock);
724
725             work_done = TRUE;
726
727             u->read_count += frames * u->frame_size;
728
729 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
730
731             if ((size_t) frames * u->frame_size >= n_bytes)
732                 break;
733
734             n_bytes -= (size_t) frames * u->frame_size;
735         }
736     }
737
738     if (u->use_tsched) {
739         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
740
741         if (*sleep_usec > process_usec)
742             *sleep_usec -= process_usec;
743         else
744             *sleep_usec = 0;
745     }
746
747     return work_done ? 1 : 0;
748 }
749
750 static void update_smoother(struct userdata *u) {
751     snd_pcm_sframes_t delay = 0;
752     uint64_t position;
753     int err;
754     pa_usec_t now1 = 0, now2;
755     snd_pcm_status_t *status;
756
757     snd_pcm_status_alloca(&status);
758
759     pa_assert(u);
760     pa_assert(u->pcm_handle);
761
762     /* Let's update the time smoother */
763
764     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
765         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
766         return;
767     }
768
769     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
770         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
771     else {
772         snd_htimestamp_t htstamp = { 0, 0 };
773         snd_pcm_status_get_htstamp(status, &htstamp);
774         now1 = pa_timespec_load(&htstamp);
775     }
776
777     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
778     if (now1 <= 0)
779         now1 = pa_rtclock_now();
780
781     /* check if the time since the last update is bigger than the interval */
782     if (u->last_smoother_update > 0)
783         if (u->last_smoother_update + u->smoother_interval > now1)
784             return;
785
786     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
787     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
788
789     pa_smoother_put(u->smoother, now1, now2);
790
791     u->last_smoother_update = now1;
792     /* exponentially increase the update interval up to the MAX limit */
793     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
794 }
795
796 static pa_usec_t source_get_latency(struct userdata *u) {
797     int64_t delay;
798     pa_usec_t now1, now2;
799
800     pa_assert(u);
801
802     now1 = pa_rtclock_now();
803     now2 = pa_smoother_get(u->smoother, now1);
804
805     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
806
807     return delay >= 0 ? (pa_usec_t) delay : 0;
808 }
809
810 static int build_pollfd(struct userdata *u) {
811     pa_assert(u);
812     pa_assert(u->pcm_handle);
813
814     if (u->alsa_rtpoll_item)
815         pa_rtpoll_item_free(u->alsa_rtpoll_item);
816
817     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
818         return -1;
819
820     return 0;
821 }
822
823 /* Called from IO context */
824 static int suspend(struct userdata *u) {
825     pa_assert(u);
826     pa_assert(u->pcm_handle);
827
828     pa_smoother_pause(u->smoother, pa_rtclock_now());
829
830     /* Let's suspend */
831     snd_pcm_close(u->pcm_handle);
832     u->pcm_handle = NULL;
833
834     if (u->alsa_rtpoll_item) {
835         pa_rtpoll_item_free(u->alsa_rtpoll_item);
836         u->alsa_rtpoll_item = NULL;
837     }
838
839     pa_log_info("Device suspended...");
840
841     return 0;
842 }
843
844 /* Called from IO context */
845 static int update_sw_params(struct userdata *u) {
846     snd_pcm_uframes_t avail_min;
847     int err;
848
849     pa_assert(u);
850
851     /* Use the full buffer if noone asked us for anything specific */
852     u->hwbuf_unused = 0;
853
854     if (u->use_tsched) {
855         pa_usec_t latency;
856
857         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
858             size_t b;
859
860             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
861
862             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
863
864             /* We need at least one sample in our buffer */
865
866             if (PA_UNLIKELY(b < u->frame_size))
867                 b = u->frame_size;
868
869             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
870         }
871
872         fix_min_sleep_wakeup(u);
873         fix_tsched_watermark(u);
874     }
875
876     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
877
878     avail_min = 1;
879
880     if (u->use_tsched) {
881         pa_usec_t sleep_usec, process_usec;
882
883         hw_sleep_time(u, &sleep_usec, &process_usec);
884         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
885     }
886
887     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
888
889     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
890         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
891         return err;
892     }
893
894     return 0;
895 }
896
897 /* Called from IO context */
898 static int unsuspend(struct userdata *u) {
899     pa_sample_spec ss;
900     int err;
901     pa_bool_t b, d;
902     snd_pcm_uframes_t period_size, buffer_size;
903
904     pa_assert(u);
905     pa_assert(!u->pcm_handle);
906
907     pa_log_info("Trying resume...");
908
909     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
910                             SND_PCM_NONBLOCK|
911                             SND_PCM_NO_AUTO_RESAMPLE|
912                             SND_PCM_NO_AUTO_CHANNELS|
913                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
914         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
915         goto fail;
916     }
917
918     ss = u->source->sample_spec;
919     period_size = u->fragment_size / u->frame_size;
920     buffer_size = u->hwbuf_size / u->frame_size;
921     b = u->use_mmap;
922     d = u->use_tsched;
923
924     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
925         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
926         goto fail;
927     }
928
929     if (b != u->use_mmap || d != u->use_tsched) {
930         pa_log_warn("Resume failed, couldn't get original access mode.");
931         goto fail;
932     }
933
934     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
935         pa_log_warn("Resume failed, couldn't restore original sample settings.");
936         goto fail;
937     }
938
939     if (period_size*u->frame_size != u->fragment_size ||
940         buffer_size*u->frame_size != u->hwbuf_size) {
941         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
942                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
943                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
944         goto fail;
945     }
946
947     if (update_sw_params(u) < 0)
948         goto fail;
949
950     if (build_pollfd(u) < 0)
951         goto fail;
952
953     /* FIXME: We need to reload the volume somehow */
954
955     u->read_count = 0;
956     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
957     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
958     u->last_smoother_update = 0;
959
960     u->first = TRUE;
961
962     pa_log_info("Resumed successfully...");
963
964     return 0;
965
966 fail:
967     if (u->pcm_handle) {
968         snd_pcm_close(u->pcm_handle);
969         u->pcm_handle = NULL;
970     }
971
972     return -PA_ERR_IO;
973 }
974
975 /* Called from IO context */
976 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
977     struct userdata *u = PA_SOURCE(o)->userdata;
978
979     switch (code) {
980
981         case PA_SOURCE_MESSAGE_GET_LATENCY: {
982             pa_usec_t r = 0;
983
984             if (u->pcm_handle)
985                 r = source_get_latency(u);
986
987             *((pa_usec_t*) data) = r;
988
989             return 0;
990         }
991
992         case PA_SOURCE_MESSAGE_SET_STATE:
993
994             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
995
996                 case PA_SOURCE_SUSPENDED: {
997                     int r;
998
999                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1000
1001                     if ((r = suspend(u)) < 0)
1002                         return r;
1003
1004                     break;
1005                 }
1006
1007                 case PA_SOURCE_IDLE:
1008                 case PA_SOURCE_RUNNING: {
1009                     int r;
1010
1011                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1012                         if (build_pollfd(u) < 0)
1013                             return -PA_ERR_IO;
1014                     }
1015
1016                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1017                         if ((r = unsuspend(u)) < 0)
1018                             return r;
1019                     }
1020
1021                     break;
1022                 }
1023
1024                 case PA_SOURCE_UNLINKED:
1025                 case PA_SOURCE_INIT:
1026                 case PA_SOURCE_INVALID_STATE:
1027                     ;
1028             }
1029
1030             break;
1031     }
1032
1033     return pa_source_process_msg(o, code, data, offset, chunk);
1034 }
1035
1036 /* Called from main context */
1037 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1038     pa_source_state_t old_state;
1039     struct userdata *u;
1040
1041     pa_source_assert_ref(s);
1042     pa_assert_se(u = s->userdata);
1043
1044     old_state = pa_source_get_state(u->source);
1045
1046     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1047         reserve_done(u);
1048     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1049         if (reserve_init(u, u->device_name) < 0)
1050             return -PA_ERR_BUSY;
1051
1052     return 0;
1053 }
1054
1055 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1056     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1057
1058     pa_assert(u);
1059     pa_assert(u->mixer_handle);
1060
1061     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1062         return 0;
1063
1064     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1065         return 0;
1066
1067     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1068         pa_source_get_volume(u->source, TRUE);
1069         pa_source_get_mute(u->source, TRUE);
1070     }
1071
1072     return 0;
1073 }
1074
1075 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1076     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1077
1078     pa_assert(u);
1079     pa_assert(u->mixer_handle);
1080
1081     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1082         return 0;
1083
1084     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1085         return 0;
1086
1087     if (mask & SND_CTL_EVENT_MASK_VALUE)
1088         pa_source_update_volume_and_mute(u->source);
1089
1090     return 0;
1091 }
1092
1093 static void source_get_volume_cb(pa_source *s) {
1094     struct userdata *u = s->userdata;
1095     pa_cvolume r;
1096     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1097
1098     pa_assert(u);
1099     pa_assert(u->mixer_path);
1100     pa_assert(u->mixer_handle);
1101
1102     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1103         return;
1104
1105     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1106     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1107
1108     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1109
1110     if (u->mixer_path->has_dB) {
1111         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1112
1113         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1114     }
1115
1116     if (pa_cvolume_equal(&u->hardware_volume, &r))
1117         return;
1118
1119     s->real_volume = u->hardware_volume = r;
1120
1121     /* Hmm, so the hardware volume changed, let's reset our software volume */
1122     if (u->mixer_path->has_dB)
1123         pa_source_set_soft_volume(s, NULL);
1124 }
1125
1126 static void source_set_volume_cb(pa_source *s) {
1127     struct userdata *u = s->userdata;
1128     pa_cvolume r;
1129     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1130     pa_bool_t sync_volume = !!(s->flags & PA_SOURCE_SYNC_VOLUME);
1131
1132     pa_assert(u);
1133     pa_assert(u->mixer_path);
1134     pa_assert(u->mixer_handle);
1135
1136     /* Shift up by the base volume */
1137     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1138
1139     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1140         return;
1141
1142     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1143     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1144
1145     u->hardware_volume = r;
1146
1147     if (u->mixer_path->has_dB) {
1148         pa_cvolume new_soft_volume;
1149         pa_bool_t accurate_enough;
1150         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1151
1152         /* Match exactly what the user requested by software */
1153         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1154
1155         /* If the adjustment to do in software is only minimal we
1156          * can skip it. That saves us CPU at the expense of a bit of
1157          * accuracy */
1158         accurate_enough =
1159             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1160             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1161
1162         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1163         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1164         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1165         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1166         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1167                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1168                      pa_yes_no(accurate_enough));
1169         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1170
1171         if (!accurate_enough)
1172             s->soft_volume = new_soft_volume;
1173
1174     } else {
1175         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1176
1177         /* We can't match exactly what the user requested, hence let's
1178          * at least tell the user about it */
1179
1180         s->real_volume = r;
1181     }
1182 }
1183
1184 static void source_write_volume_cb(pa_source *s) {
1185     struct userdata *u = s->userdata;
1186     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1187
1188     pa_assert(u);
1189     pa_assert(u->mixer_path);
1190     pa_assert(u->mixer_handle);
1191     pa_assert(s->flags & PA_SOURCE_SYNC_VOLUME);
1192
1193     /* Shift up by the base volume */
1194     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1195
1196     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1197         pa_log_error("Writing HW volume failed");
1198     else {
1199         pa_cvolume tmp_vol;
1200         pa_bool_t accurate_enough;
1201
1202         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1203         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1204
1205         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1206         accurate_enough =
1207             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1208             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1209
1210         if (!accurate_enough) {
1211             union {
1212                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1213                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1214             } vol;
1215
1216             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1217                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1218                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1219             pa_log_debug("                                           in dB: %s (request) != %s",
1220                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1221                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1222         }
1223     }
1224 }
1225
1226 static void source_get_mute_cb(pa_source *s) {
1227     struct userdata *u = s->userdata;
1228     pa_bool_t b;
1229
1230     pa_assert(u);
1231     pa_assert(u->mixer_path);
1232     pa_assert(u->mixer_handle);
1233
1234     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1235         return;
1236
1237     s->muted = b;
1238 }
1239
1240 static void source_set_mute_cb(pa_source *s) {
1241     struct userdata *u = s->userdata;
1242
1243     pa_assert(u);
1244     pa_assert(u->mixer_path);
1245     pa_assert(u->mixer_handle);
1246
1247     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1248 }
1249
1250 static void mixer_volume_init(struct userdata *u) {
1251     pa_assert(u);
1252
1253     if (!u->mixer_path->has_volume) {
1254         pa_source_set_write_volume_callback(u->source, NULL);
1255         pa_source_set_get_volume_callback(u->source, NULL);
1256         pa_source_set_set_volume_callback(u->source, NULL);
1257
1258         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1259     } else {
1260         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1261         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1262
1263         if (u->mixer_path->has_dB && u->sync_volume) {
1264             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1265             pa_log_info("Successfully enabled synchronous volume.");
1266         } else
1267             pa_source_set_write_volume_callback(u->source, NULL);
1268
1269         if (u->mixer_path->has_dB) {
1270             pa_source_enable_decibel_volume(u->source, TRUE);
1271             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1272
1273             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1274             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1275
1276             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1277         } else {
1278             pa_source_enable_decibel_volume(u->source, FALSE);
1279             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1280
1281             u->source->base_volume = PA_VOLUME_NORM;
1282             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1283         }
1284
1285         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1286     }
1287
1288     if (!u->mixer_path->has_mute) {
1289         pa_source_set_get_mute_callback(u->source, NULL);
1290         pa_source_set_set_mute_callback(u->source, NULL);
1291         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1292     } else {
1293         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1294         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1295         pa_log_info("Using hardware mute control.");
1296     }
1297 }
1298
1299 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1300     struct userdata *u = s->userdata;
1301     pa_alsa_port_data *data;
1302
1303     pa_assert(u);
1304     pa_assert(p);
1305     pa_assert(u->mixer_handle);
1306
1307     data = PA_DEVICE_PORT_DATA(p);
1308
1309     pa_assert_se(u->mixer_path = data->path);
1310     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1311
1312     mixer_volume_init(u);
1313
1314     if (data->setting)
1315         pa_alsa_setting_select(data->setting, u->mixer_handle);
1316
1317     if (s->set_mute)
1318         s->set_mute(s);
1319     if (s->set_volume)
1320         s->set_volume(s);
1321
1322     return 0;
1323 }
1324
1325 static void source_update_requested_latency_cb(pa_source *s) {
1326     struct userdata *u = s->userdata;
1327     pa_assert(u);
1328     pa_assert(u->use_tsched); /* only when timer scheduling is used
1329                                * we can dynamically adjust the
1330                                * latency */
1331
1332     if (!u->pcm_handle)
1333         return;
1334
1335     update_sw_params(u);
1336 }
1337
1338 static void thread_func(void *userdata) {
1339     struct userdata *u = userdata;
1340     unsigned short revents = 0;
1341
1342     pa_assert(u);
1343
1344     pa_log_debug("Thread starting up");
1345
1346     if (u->core->realtime_scheduling)
1347         pa_make_realtime(u->core->realtime_priority);
1348
1349     pa_thread_mq_install(&u->thread_mq);
1350
1351     for (;;) {
1352         int ret;
1353         pa_usec_t rtpoll_sleep = 0;
1354
1355 #ifdef DEBUG_TIMING
1356         pa_log_debug("Loop");
1357 #endif
1358
1359         /* Read some data and pass it to the sources */
1360         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1361             int work_done;
1362             pa_usec_t sleep_usec = 0;
1363             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1364
1365             if (u->first) {
1366                 pa_log_info("Starting capture.");
1367                 snd_pcm_start(u->pcm_handle);
1368
1369                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1370
1371                 u->first = FALSE;
1372             }
1373
1374             if (u->use_mmap)
1375                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1376             else
1377                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1378
1379             if (work_done < 0)
1380                 goto fail;
1381
1382 /*             pa_log_debug("work_done = %i", work_done); */
1383
1384             if (work_done)
1385                 update_smoother(u);
1386
1387             if (u->use_tsched) {
1388                 pa_usec_t cusec;
1389
1390                 /* OK, the capture buffer is now empty, let's
1391                  * calculate when to wake up next */
1392
1393 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1394
1395                 /* Convert from the sound card time domain to the
1396                  * system time domain */
1397                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1398
1399 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1400
1401                 /* We don't trust the conversion, so we wake up whatever comes first */
1402                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1403             }
1404         }
1405
1406         if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1407             pa_usec_t volume_sleep;
1408             pa_source_volume_change_apply(u->source, &volume_sleep);
1409             if (volume_sleep > 0)
1410                 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1411         }
1412
1413         if (rtpoll_sleep > 0)
1414             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1415         else
1416             pa_rtpoll_set_timer_disabled(u->rtpoll);
1417
1418         /* Hmm, nothing to do. Let's sleep */
1419         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1420             goto fail;
1421
1422         if (u->source->flags & PA_SOURCE_SYNC_VOLUME)
1423             pa_source_volume_change_apply(u->source, NULL);
1424
1425         if (ret == 0)
1426             goto finish;
1427
1428         /* Tell ALSA about this and process its response */
1429         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1430             struct pollfd *pollfd;
1431             int err;
1432             unsigned n;
1433
1434             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1435
1436             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1437                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1438                 goto fail;
1439             }
1440
1441             if (revents & ~POLLIN) {
1442                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1443                     goto fail;
1444
1445                 u->first = TRUE;
1446             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1447                 pa_log_debug("Wakeup from ALSA!");
1448
1449         } else
1450             revents = 0;
1451     }
1452
1453 fail:
1454     /* If this was no regular exit from the loop we have to continue
1455      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1456     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1457     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1458
1459 finish:
1460     pa_log_debug("Thread shutting down");
1461 }
1462
1463 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1464     const char *n;
1465     char *t;
1466
1467     pa_assert(data);
1468     pa_assert(ma);
1469     pa_assert(device_name);
1470
1471     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1472         pa_source_new_data_set_name(data, n);
1473         data->namereg_fail = TRUE;
1474         return;
1475     }
1476
1477     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1478         data->namereg_fail = TRUE;
1479     else {
1480         n = device_id ? device_id : device_name;
1481         data->namereg_fail = FALSE;
1482     }
1483
1484     if (mapping)
1485         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1486     else
1487         t = pa_sprintf_malloc("alsa_input.%s", n);
1488
1489     pa_source_new_data_set_name(data, t);
1490     pa_xfree(t);
1491 }
1492
1493 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1494
1495     if (!mapping && !element)
1496         return;
1497
1498     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1499         pa_log_info("Failed to find a working mixer device.");
1500         return;
1501     }
1502
1503     if (element) {
1504
1505         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1506             goto fail;
1507
1508         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1509             goto fail;
1510
1511         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1512         pa_alsa_path_dump(u->mixer_path);
1513     } else {
1514
1515         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1516             goto fail;
1517
1518         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1519     }
1520
1521     return;
1522
1523 fail:
1524
1525     if (u->mixer_path_set) {
1526         pa_alsa_path_set_free(u->mixer_path_set);
1527         u->mixer_path_set = NULL;
1528     } else if (u->mixer_path) {
1529         pa_alsa_path_free(u->mixer_path);
1530         u->mixer_path = NULL;
1531     }
1532
1533     if (u->mixer_handle) {
1534         snd_mixer_close(u->mixer_handle);
1535         u->mixer_handle = NULL;
1536     }
1537 }
1538
1539 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1540     pa_bool_t need_mixer_callback = FALSE;
1541
1542     pa_assert(u);
1543
1544     if (!u->mixer_handle)
1545         return 0;
1546
1547     if (u->source->active_port) {
1548         pa_alsa_port_data *data;
1549
1550         /* We have a list of supported paths, so let's activate the
1551          * one that has been chosen as active */
1552
1553         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1554         u->mixer_path = data->path;
1555
1556         pa_alsa_path_select(data->path, u->mixer_handle);
1557
1558         if (data->setting)
1559             pa_alsa_setting_select(data->setting, u->mixer_handle);
1560
1561     } else {
1562
1563         if (!u->mixer_path && u->mixer_path_set)
1564             u->mixer_path = u->mixer_path_set->paths;
1565
1566         if (u->mixer_path) {
1567             /* Hmm, we have only a single path, then let's activate it */
1568
1569             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1570
1571             if (u->mixer_path->settings)
1572                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1573         } else
1574             return 0;
1575     }
1576
1577     mixer_volume_init(u);
1578
1579     /* Will we need to register callbacks? */
1580     if (u->mixer_path_set && u->mixer_path_set->paths) {
1581         pa_alsa_path *p;
1582
1583         PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1584             if (p->has_volume || p->has_mute)
1585                 need_mixer_callback = TRUE;
1586         }
1587     }
1588     else if (u->mixer_path)
1589         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1590
1591     if (need_mixer_callback) {
1592         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1593         if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1594             u->mixer_pd = pa_alsa_mixer_pdata_new();
1595             mixer_callback = io_mixer_callback;
1596
1597             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1598                 pa_log("Failed to initialize file descriptor monitoring");
1599                 return -1;
1600             }
1601         } else {
1602             u->mixer_fdl = pa_alsa_fdlist_new();
1603             mixer_callback = ctl_mixer_callback;
1604
1605             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1606                 pa_log("Failed to initialize file descriptor monitoring");
1607                 return -1;
1608             }
1609         }
1610
1611         if (u->mixer_path_set)
1612             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1613         else
1614             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1615     }
1616
1617     return 0;
1618 }
1619
1620 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1621
1622     struct userdata *u = NULL;
1623     const char *dev_id = NULL;
1624     pa_sample_spec ss, requested_ss;
1625     pa_channel_map map;
1626     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1627     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1628     size_t frame_size;
1629     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1630     pa_source_new_data data;
1631     pa_alsa_profile_set *profile_set = NULL;
1632
1633     pa_assert(m);
1634     pa_assert(ma);
1635
1636     ss = m->core->default_sample_spec;
1637     map = m->core->default_channel_map;
1638     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1639         pa_log("Failed to parse sample specification and channel map");
1640         goto fail;
1641     }
1642
1643     requested_ss = ss;
1644     frame_size = pa_frame_size(&ss);
1645
1646     nfrags = m->core->default_n_fragments;
1647     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1648     if (frag_size <= 0)
1649         frag_size = (uint32_t) frame_size;
1650     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1651     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1652
1653     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1654         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1655         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1656         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1657         pa_log("Failed to parse buffer metrics");
1658         goto fail;
1659     }
1660
1661     buffer_size = nfrags * frag_size;
1662
1663     period_frames = frag_size/frame_size;
1664     buffer_frames = buffer_size/frame_size;
1665     tsched_frames = tsched_size/frame_size;
1666
1667     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1668         pa_log("Failed to parse mmap argument.");
1669         goto fail;
1670     }
1671
1672     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1673         pa_log("Failed to parse tsched argument.");
1674         goto fail;
1675     }
1676
1677     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1678         pa_log("Failed to parse ignore_dB argument.");
1679         goto fail;
1680     }
1681
1682     sync_volume = m->core->sync_volume;
1683     if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1684         pa_log("Failed to parse sync_volume argument.");
1685         goto fail;
1686     }
1687
1688     use_tsched = pa_alsa_may_tsched(use_tsched);
1689
1690     u = pa_xnew0(struct userdata, 1);
1691     u->core = m->core;
1692     u->module = m;
1693     u->use_mmap = use_mmap;
1694     u->use_tsched = use_tsched;
1695     u->sync_volume = sync_volume;
1696     u->first = TRUE;
1697     u->rtpoll = pa_rtpoll_new();
1698     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1699
1700     u->smoother = pa_smoother_new(
1701             SMOOTHER_ADJUST_USEC,
1702             SMOOTHER_WINDOW_USEC,
1703             TRUE,
1704             TRUE,
1705             5,
1706             pa_rtclock_now(),
1707             TRUE);
1708     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1709
1710     dev_id = pa_modargs_get_value(
1711             ma, "device_id",
1712             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1713
1714     if (reserve_init(u, dev_id) < 0)
1715         goto fail;
1716
1717     if (reserve_monitor_init(u, dev_id) < 0)
1718         goto fail;
1719
1720     b = use_mmap;
1721     d = use_tsched;
1722
1723     if (mapping) {
1724
1725         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1726             pa_log("device_id= not set");
1727             goto fail;
1728         }
1729
1730         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1731                       dev_id,
1732                       &u->device_name,
1733                       &ss, &map,
1734                       SND_PCM_STREAM_CAPTURE,
1735                       &period_frames, &buffer_frames, tsched_frames,
1736                       &b, &d, mapping)))
1737             goto fail;
1738
1739     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1740
1741         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1742             goto fail;
1743
1744         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1745                       dev_id,
1746                       &u->device_name,
1747                       &ss, &map,
1748                       SND_PCM_STREAM_CAPTURE,
1749                       &period_frames, &buffer_frames, tsched_frames,
1750                       &b, &d, profile_set, &mapping)))
1751             goto fail;
1752
1753     } else {
1754
1755         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1756                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1757                       &u->device_name,
1758                       &ss, &map,
1759                       SND_PCM_STREAM_CAPTURE,
1760                       &period_frames, &buffer_frames, tsched_frames,
1761                       &b, &d, FALSE)))
1762             goto fail;
1763     }
1764
1765     pa_assert(u->device_name);
1766     pa_log_info("Successfully opened device %s.", u->device_name);
1767
1768     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1769         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1770         goto fail;
1771     }
1772
1773     if (mapping)
1774         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1775
1776     if (use_mmap && !b) {
1777         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1778         u->use_mmap = use_mmap = FALSE;
1779     }
1780
1781     if (use_tsched && (!b || !d)) {
1782         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1783         u->use_tsched = use_tsched = FALSE;
1784     }
1785
1786     if (u->use_mmap)
1787         pa_log_info("Successfully enabled mmap() mode.");
1788
1789     if (u->use_tsched)
1790         pa_log_info("Successfully enabled timer-based scheduling mode.");
1791
1792     /* ALSA might tweak the sample spec, so recalculate the frame size */
1793     frame_size = pa_frame_size(&ss);
1794
1795     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1796
1797     pa_source_new_data_init(&data);
1798     data.driver = driver;
1799     data.module = m;
1800     data.card = card;
1801     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1802
1803     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1804      * variable instead of using &data.namereg_fail directly, because
1805      * data.namereg_fail is a bitfield and taking the address of a bitfield
1806      * variable is impossible. */
1807     namereg_fail = data.namereg_fail;
1808     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1809         pa_log("Failed to parse boolean argument namereg_fail.");
1810         pa_source_new_data_done(&data);
1811         goto fail;
1812     }
1813     data.namereg_fail = namereg_fail;
1814
1815     pa_source_new_data_set_sample_spec(&data, &ss);
1816     pa_source_new_data_set_channel_map(&data, &map);
1817
1818     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1819     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1820     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1821     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1822     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1823
1824     if (mapping) {
1825         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1826         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1827     }
1828
1829     pa_alsa_init_description(data.proplist);
1830
1831     if (u->control_device)
1832         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1833
1834     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1835         pa_log("Invalid properties");
1836         pa_source_new_data_done(&data);
1837         goto fail;
1838     }
1839
1840     if (u->mixer_path_set)
1841         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1842
1843     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1844     pa_source_new_data_done(&data);
1845
1846     if (!u->source) {
1847         pa_log("Failed to create source object");
1848         goto fail;
1849     }
1850
1851     if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
1852                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1853         pa_log("Failed to parse sync_volume_safety_margin parameter");
1854         goto fail;
1855     }
1856
1857     if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
1858                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1859         pa_log("Failed to parse sync_volume_extra_delay parameter");
1860         goto fail;
1861     }
1862
1863     u->source->parent.process_msg = source_process_msg;
1864     if (u->use_tsched)
1865         u->source->update_requested_latency = source_update_requested_latency_cb;
1866     u->source->set_state = source_set_state_cb;
1867     u->source->set_port = source_set_port_cb;
1868     u->source->userdata = u;
1869
1870     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1871     pa_source_set_rtpoll(u->source, u->rtpoll);
1872
1873     u->frame_size = frame_size;
1874     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1875     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1876     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1877
1878     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1879                 (double) u->hwbuf_size / (double) u->fragment_size,
1880                 (long unsigned) u->fragment_size,
1881                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1882                 (long unsigned) u->hwbuf_size,
1883                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1884
1885     if (u->use_tsched) {
1886         u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1887
1888         u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1889         u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1890
1891         u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1892         u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1893
1894         fix_min_sleep_wakeup(u);
1895         fix_tsched_watermark(u);
1896
1897         pa_source_set_latency_range(u->source,
1898                                     0,
1899                                     pa_bytes_to_usec(u->hwbuf_size, &ss));
1900
1901         pa_log_info("Time scheduling watermark is %0.2fms",
1902                     (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1903     } else
1904         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1905
1906     reserve_update(u);
1907
1908     if (update_sw_params(u) < 0)
1909         goto fail;
1910
1911     if (setup_mixer(u, ignore_dB) < 0)
1912         goto fail;
1913
1914     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1915
1916     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1917         pa_log("Failed to create thread.");
1918         goto fail;
1919     }
1920
1921     /* Get initial mixer settings */
1922     if (data.volume_is_set) {
1923         if (u->source->set_volume)
1924             u->source->set_volume(u->source);
1925     } else {
1926         if (u->source->get_volume)
1927             u->source->get_volume(u->source);
1928     }
1929
1930     if (data.muted_is_set) {
1931         if (u->source->set_mute)
1932             u->source->set_mute(u->source);
1933     } else {
1934         if (u->source->get_mute)
1935             u->source->get_mute(u->source);
1936     }
1937
1938     pa_source_put(u->source);
1939
1940     if (profile_set)
1941         pa_alsa_profile_set_free(profile_set);
1942
1943     return u->source;
1944
1945 fail:
1946
1947     if (u)
1948         userdata_free(u);
1949
1950     if (profile_set)
1951         pa_alsa_profile_set_free(profile_set);
1952
1953     return NULL;
1954 }
1955
1956 static void userdata_free(struct userdata *u) {
1957     pa_assert(u);
1958
1959     if (u->source)
1960         pa_source_unlink(u->source);
1961
1962     if (u->thread) {
1963         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1964         pa_thread_free(u->thread);
1965     }
1966
1967     pa_thread_mq_done(&u->thread_mq);
1968
1969     if (u->source)
1970         pa_source_unref(u->source);
1971
1972     if (u->mixer_pd)
1973         pa_alsa_mixer_pdata_free(u->mixer_pd);
1974
1975     if (u->alsa_rtpoll_item)
1976         pa_rtpoll_item_free(u->alsa_rtpoll_item);
1977
1978     if (u->rtpoll)
1979         pa_rtpoll_free(u->rtpoll);
1980
1981     if (u->pcm_handle) {
1982         snd_pcm_drop(u->pcm_handle);
1983         snd_pcm_close(u->pcm_handle);
1984     }
1985
1986     if (u->mixer_fdl)
1987         pa_alsa_fdlist_free(u->mixer_fdl);
1988
1989     if (u->mixer_path_set)
1990         pa_alsa_path_set_free(u->mixer_path_set);
1991     else if (u->mixer_path)
1992         pa_alsa_path_free(u->mixer_path);
1993
1994     if (u->mixer_handle)
1995         snd_mixer_close(u->mixer_handle);
1996
1997     if (u->smoother)
1998         pa_smoother_free(u->smoother);
1999
2000     reserve_done(u);
2001     monitor_done(u);
2002
2003     pa_xfree(u->device_name);
2004     pa_xfree(u->control_device);
2005     pa_xfree(u);
2006 }
2007
2008 void pa_alsa_source_free(pa_source *s) {
2009     struct userdata *u;
2010
2011     pa_source_assert_ref(s);
2012     pa_assert_se(u = s->userdata);
2013
2014     userdata_free(u);
2015 }