Remove unnecessary #includes
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/i18n.h>
32 #include <pulse/rtclock.h>
33 #include <pulse/timeval.h>
34 #include <pulse/volume.h>
35 #include <pulse/xmalloc.h>
36
37 #include <pulsecore/core.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     size_t
102         frame_size,
103         fragment_size,
104         hwbuf_size,
105         tsched_watermark,
106         hwbuf_unused,
107         min_sleep,
108         min_wakeup,
109         watermark_inc_step,
110         watermark_dec_step,
111         watermark_inc_threshold,
112         watermark_dec_threshold;
113
114     pa_usec_t watermark_dec_not_before;
115
116     char *device_name;  /* name of the PCM device */
117     char *control_device; /* name of the control device */
118
119     pa_bool_t use_mmap:1, use_tsched:1;
120
121     pa_bool_t first;
122
123     pa_rtpoll_item *alsa_rtpoll_item;
124
125     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
126
127     pa_smoother *smoother;
128     uint64_t read_count;
129     pa_usec_t smoother_interval;
130     pa_usec_t last_smoother_update;
131
132     pa_reserve_wrapper *reserve;
133     pa_hook_slot *reserve_slot;
134     pa_reserve_monitor_wrapper *monitor;
135     pa_hook_slot *monitor_slot;
136 };
137
138 static void userdata_free(struct userdata *u);
139
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
141     pa_assert(r);
142     pa_assert(u);
143
144     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145         return PA_HOOK_CANCEL;
146
147     return PA_HOOK_OK;
148 }
149
150 static void reserve_done(struct userdata *u) {
151     pa_assert(u);
152
153     if (u->reserve_slot) {
154         pa_hook_slot_free(u->reserve_slot);
155         u->reserve_slot = NULL;
156     }
157
158     if (u->reserve) {
159         pa_reserve_wrapper_unref(u->reserve);
160         u->reserve = NULL;
161     }
162 }
163
164 static void reserve_update(struct userdata *u) {
165     const char *description;
166     pa_assert(u);
167
168     if (!u->source || !u->reserve)
169         return;
170
171     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
173 }
174
175 static int reserve_init(struct userdata *u, const char *dname) {
176     char *rname;
177
178     pa_assert(u);
179     pa_assert(dname);
180
181     if (u->reserve)
182         return 0;
183
184     if (pa_in_system_mode())
185         return 0;
186
187     if (!(rname = pa_alsa_get_reserve_name(dname)))
188         return 0;
189
190     /* We are resuming, try to lock the device */
191     u->reserve = pa_reserve_wrapper_get(u->core, rname);
192     pa_xfree(rname);
193
194     if (!(u->reserve))
195         return -1;
196
197     reserve_update(u);
198
199     pa_assert(!u->reserve_slot);
200     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
201
202     return 0;
203 }
204
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
206     pa_bool_t b;
207
208     pa_assert(w);
209     pa_assert(u);
210
211     b = PA_PTR_TO_UINT(busy) && !u->reserve;
212
213     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
214     return PA_HOOK_OK;
215 }
216
217 static void monitor_done(struct userdata *u) {
218     pa_assert(u);
219
220     if (u->monitor_slot) {
221         pa_hook_slot_free(u->monitor_slot);
222         u->monitor_slot = NULL;
223     }
224
225     if (u->monitor) {
226         pa_reserve_monitor_wrapper_unref(u->monitor);
227         u->monitor = NULL;
228     }
229 }
230
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
232     char *rname;
233
234     pa_assert(u);
235     pa_assert(dname);
236
237     if (pa_in_system_mode())
238         return 0;
239
240     if (!(rname = pa_alsa_get_reserve_name(dname)))
241         return 0;
242
243     /* We are resuming, try to lock the device */
244     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
245     pa_xfree(rname);
246
247     if (!(u->monitor))
248         return -1;
249
250     pa_assert(!u->monitor_slot);
251     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
252
253     return 0;
254 }
255
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257     size_t max_use, max_use_2;
258
259     pa_assert(u);
260     pa_assert(u->use_tsched);
261
262     max_use = u->hwbuf_size - u->hwbuf_unused;
263     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
264
265     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
267
268     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
270 }
271
272 static void fix_tsched_watermark(struct userdata *u) {
273     size_t max_use;
274     pa_assert(u);
275     pa_assert(u->use_tsched);
276
277     max_use = u->hwbuf_size - u->hwbuf_unused;
278
279     if (u->tsched_watermark > max_use - u->min_sleep)
280         u->tsched_watermark = max_use - u->min_sleep;
281
282     if (u->tsched_watermark < u->min_wakeup)
283         u->tsched_watermark = u->min_wakeup;
284 }
285
286 static void increase_watermark(struct userdata *u) {
287     size_t old_watermark;
288     pa_usec_t old_min_latency, new_min_latency;
289
290     pa_assert(u);
291     pa_assert(u->use_tsched);
292
293     /* First, just try to increase the watermark */
294     old_watermark = u->tsched_watermark;
295     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296     fix_tsched_watermark(u);
297
298     if (old_watermark != u->tsched_watermark) {
299         pa_log_info("Increasing wakeup watermark to %0.2f ms",
300                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
301         return;
302     }
303
304     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305     old_min_latency = u->source->thread_info.min_latency;
306     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
308
309     if (old_min_latency != new_min_latency) {
310         pa_log_info("Increasing minimal latency to %0.2f ms",
311                     (double) new_min_latency / PA_USEC_PER_MSEC);
312
313         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
314     }
315
316     /* When we reach this we're officialy fucked! */
317 }
318
319 static void decrease_watermark(struct userdata *u) {
320     size_t old_watermark;
321     pa_usec_t now;
322
323     pa_assert(u);
324     pa_assert(u->use_tsched);
325
326     now = pa_rtclock_now();
327
328     if (u->watermark_dec_not_before <= 0)
329         goto restart;
330
331     if (u->watermark_dec_not_before > now)
332         return;
333
334     old_watermark = u->tsched_watermark;
335
336     if (u->tsched_watermark < u->watermark_dec_step)
337         u->tsched_watermark = u->tsched_watermark / 2;
338     else
339         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
340
341     fix_tsched_watermark(u);
342
343     if (old_watermark != u->tsched_watermark)
344         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
346
347     /* We don't change the latency range*/
348
349 restart:
350     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
351 }
352
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
354     pa_usec_t wm, usec;
355
356     pa_assert(sleep_usec);
357     pa_assert(process_usec);
358
359     pa_assert(u);
360     pa_assert(u->use_tsched);
361
362     usec = pa_source_get_requested_latency_within_thread(u->source);
363
364     if (usec == (pa_usec_t) -1)
365         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
366
367     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
368
369     if (wm > usec)
370         wm = usec/2;
371
372     *sleep_usec = usec - wm;
373     *process_usec = wm;
374
375 #ifdef DEBUG_TIMING
376     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377                  (unsigned long) (usec / PA_USEC_PER_MSEC),
378                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 #endif
381 }
382
383 static int try_recover(struct userdata *u, const char *call, int err) {
384     pa_assert(u);
385     pa_assert(call);
386     pa_assert(err < 0);
387
388     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
389
390     pa_assert(err != -EAGAIN);
391
392     if (err == -EPIPE)
393         pa_log_debug("%s: Buffer overrun!", call);
394
395     if (err == -ESTRPIPE)
396         pa_log_debug("%s: System suspended!", call);
397
398     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399         pa_log("%s: %s", call, pa_alsa_strerror(err));
400         return -1;
401     }
402
403     u->first = TRUE;
404     return 0;
405 }
406
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408     size_t left_to_record;
409     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410     pa_bool_t overrun = FALSE;
411
412     /* We use <= instead of < for this check here because an overrun
413      * only happens after the last sample was processed, not already when
414      * it is removed from the buffer. This is particularly important
415      * when block transfer is used. */
416
417     if (n_bytes <= rec_space)
418         left_to_record = rec_space - n_bytes;
419     else {
420
421         /* We got a dropout. What a mess! */
422         left_to_record = 0;
423         overrun = TRUE;
424
425 #ifdef DEBUG_TIMING
426         PA_DEBUG_TRAP;
427 #endif
428
429         if (pa_log_ratelimit(PA_LOG_INFO))
430             pa_log_info("Overrun!");
431     }
432
433 #ifdef DEBUG_TIMING
434     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
435 #endif
436
437     if (u->use_tsched) {
438         pa_bool_t reset_not_before = TRUE;
439
440         if (overrun || left_to_record < u->watermark_inc_threshold)
441             increase_watermark(u);
442         else if (left_to_record > u->watermark_dec_threshold) {
443             reset_not_before = FALSE;
444
445             /* We decrease the watermark only if have actually
446              * been woken up by a timeout. If something else woke
447              * us up it's too easy to fulfill the deadlines... */
448
449             if (on_timeout)
450                 decrease_watermark(u);
451         }
452
453         if (reset_not_before)
454             u->watermark_dec_not_before = 0;
455     }
456
457     return left_to_record;
458 }
459
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461     pa_bool_t work_done = FALSE;
462     pa_usec_t max_sleep_usec = 0, process_usec = 0;
463     size_t left_to_record;
464     unsigned j = 0;
465
466     pa_assert(u);
467     pa_source_assert_ref(u->source);
468
469     if (u->use_tsched)
470         hw_sleep_time(u, &max_sleep_usec, &process_usec);
471
472     for (;;) {
473         snd_pcm_sframes_t n;
474         size_t n_bytes;
475         int r;
476         pa_bool_t after_avail = TRUE;
477
478         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
479
480             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
481                 continue;
482
483             return r;
484         }
485
486         n_bytes = (size_t) n * u->frame_size;
487
488 #ifdef DEBUG_TIMING
489         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
490 #endif
491
492         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
493         on_timeout = FALSE;
494
495         if (u->use_tsched)
496             if (!polled &&
497                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
498 #ifdef DEBUG_TIMING
499                 pa_log_debug("Not reading, because too early.");
500 #endif
501                 break;
502             }
503
504         if (PA_UNLIKELY(n_bytes <= 0)) {
505
506             if (polled)
507                 PA_ONCE_BEGIN {
508                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
512                            pa_strnull(dn));
513                     pa_xfree(dn);
514                 } PA_ONCE_END;
515
516 #ifdef DEBUG_TIMING
517             pa_log_debug("Not reading, because not necessary.");
518 #endif
519             break;
520         }
521
522
523         if (++j > 10) {
524 #ifdef DEBUG_TIMING
525             pa_log_debug("Not filling up, because already too many iterations.");
526 #endif
527
528             break;
529         }
530
531         polled = FALSE;
532
533 #ifdef DEBUG_TIMING
534         pa_log_debug("Reading");
535 #endif
536
537         for (;;) {
538             pa_memchunk chunk;
539             void *p;
540             int err;
541             const snd_pcm_channel_area_t *areas;
542             snd_pcm_uframes_t offset, frames;
543             snd_pcm_sframes_t sframes;
544
545             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
547
548             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
549
550                 if (!after_avail && err == -EAGAIN)
551                     break;
552
553                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
554                     continue;
555
556                 return r;
557             }
558
559             /* Make sure that if these memblocks need to be copied they will fit into one slot */
560             if (frames > pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size)
561                 frames = pa_mempool_block_size_max(u->source->core->mempool)/u->frame_size;
562
563             if (!after_avail && frames == 0)
564                 break;
565
566             pa_assert(frames > 0);
567             after_avail = FALSE;
568
569             /* Check these are multiples of 8 bit */
570             pa_assert((areas[0].first & 7) == 0);
571             pa_assert((areas[0].step & 7)== 0);
572
573             /* We assume a single interleaved memory buffer */
574             pa_assert((areas[0].first >> 3) == 0);
575             pa_assert((areas[0].step >> 3) == u->frame_size);
576
577             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
578
579             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580             chunk.length = pa_memblock_get_length(chunk.memblock);
581             chunk.index = 0;
582
583             pa_source_post(u->source, &chunk);
584             pa_memblock_unref_fixed(chunk.memblock);
585
586             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
587
588                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
589                     continue;
590
591                 return r;
592             }
593
594             work_done = TRUE;
595
596             u->read_count += frames * u->frame_size;
597
598 #ifdef DEBUG_TIMING
599             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
600 #endif
601
602             if ((size_t) frames * u->frame_size >= n_bytes)
603                 break;
604
605             n_bytes -= (size_t) frames * u->frame_size;
606         }
607     }
608
609     if (u->use_tsched) {
610         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
611
612         if (*sleep_usec > process_usec)
613             *sleep_usec -= process_usec;
614         else
615             *sleep_usec = 0;
616     }
617
618     return work_done ? 1 : 0;
619 }
620
621 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
622     int work_done = FALSE;
623     pa_usec_t max_sleep_usec = 0, process_usec = 0;
624     size_t left_to_record;
625     unsigned j = 0;
626
627     pa_assert(u);
628     pa_source_assert_ref(u->source);
629
630     if (u->use_tsched)
631         hw_sleep_time(u, &max_sleep_usec, &process_usec);
632
633     for (;;) {
634         snd_pcm_sframes_t n;
635         size_t n_bytes;
636         int r;
637         pa_bool_t after_avail = TRUE;
638
639         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
640
641             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
642                 continue;
643
644             return r;
645         }
646
647         n_bytes = (size_t) n * u->frame_size;
648         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
649         on_timeout = FALSE;
650
651         if (u->use_tsched)
652             if (!polled &&
653                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
654                 break;
655
656         if (PA_UNLIKELY(n_bytes <= 0)) {
657
658             if (polled)
659                 PA_ONCE_BEGIN {
660                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
661                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
662                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
663                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
664                            pa_strnull(dn));
665                     pa_xfree(dn);
666                 } PA_ONCE_END;
667
668             break;
669         }
670
671         if (++j > 10) {
672 #ifdef DEBUG_TIMING
673             pa_log_debug("Not filling up, because already too many iterations.");
674 #endif
675
676             break;
677         }
678
679         polled = FALSE;
680
681         for (;;) {
682             void *p;
683             snd_pcm_sframes_t frames;
684             pa_memchunk chunk;
685
686             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
687
688             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
689
690             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
691                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
692
693 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
694
695             p = pa_memblock_acquire(chunk.memblock);
696             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
697             pa_memblock_release(chunk.memblock);
698
699             if (PA_UNLIKELY(frames < 0)) {
700                 pa_memblock_unref(chunk.memblock);
701
702                 if (!after_avail && (int) frames == -EAGAIN)
703                     break;
704
705                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
706                     continue;
707
708                 return r;
709             }
710
711             if (!after_avail && frames == 0) {
712                 pa_memblock_unref(chunk.memblock);
713                 break;
714             }
715
716             pa_assert(frames > 0);
717             after_avail = FALSE;
718
719             chunk.index = 0;
720             chunk.length = (size_t) frames * u->frame_size;
721
722             pa_source_post(u->source, &chunk);
723             pa_memblock_unref(chunk.memblock);
724
725             work_done = TRUE;
726
727             u->read_count += frames * u->frame_size;
728
729 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
730
731             if ((size_t) frames * u->frame_size >= n_bytes)
732                 break;
733
734             n_bytes -= (size_t) frames * u->frame_size;
735         }
736     }
737
738     if (u->use_tsched) {
739         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
740
741         if (*sleep_usec > process_usec)
742             *sleep_usec -= process_usec;
743         else
744             *sleep_usec = 0;
745     }
746
747     return work_done ? 1 : 0;
748 }
749
750 static void update_smoother(struct userdata *u) {
751     snd_pcm_sframes_t delay = 0;
752     uint64_t position;
753     int err;
754     pa_usec_t now1 = 0, now2;
755     snd_pcm_status_t *status;
756
757     snd_pcm_status_alloca(&status);
758
759     pa_assert(u);
760     pa_assert(u->pcm_handle);
761
762     /* Let's update the time smoother */
763
764     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
765         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
766         return;
767     }
768
769     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
770         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
771     else {
772         snd_htimestamp_t htstamp = { 0, 0 };
773         snd_pcm_status_get_htstamp(status, &htstamp);
774         now1 = pa_timespec_load(&htstamp);
775     }
776
777     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
778     if (now1 <= 0)
779         now1 = pa_rtclock_now();
780
781     /* check if the time since the last update is bigger than the interval */
782     if (u->last_smoother_update > 0)
783         if (u->last_smoother_update + u->smoother_interval > now1)
784             return;
785
786     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
787     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
788
789     pa_smoother_put(u->smoother, now1, now2);
790
791     u->last_smoother_update = now1;
792     /* exponentially increase the update interval up to the MAX limit */
793     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
794 }
795
796 static pa_usec_t source_get_latency(struct userdata *u) {
797     int64_t delay;
798     pa_usec_t now1, now2;
799
800     pa_assert(u);
801
802     now1 = pa_rtclock_now();
803     now2 = pa_smoother_get(u->smoother, now1);
804
805     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
806
807     return delay >= 0 ? (pa_usec_t) delay : 0;
808 }
809
810 static int build_pollfd(struct userdata *u) {
811     pa_assert(u);
812     pa_assert(u->pcm_handle);
813
814     if (u->alsa_rtpoll_item)
815         pa_rtpoll_item_free(u->alsa_rtpoll_item);
816
817     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
818         return -1;
819
820     return 0;
821 }
822
823 /* Called from IO context */
824 static int suspend(struct userdata *u) {
825     pa_assert(u);
826     pa_assert(u->pcm_handle);
827
828     pa_smoother_pause(u->smoother, pa_rtclock_now());
829
830     /* Let's suspend */
831     snd_pcm_close(u->pcm_handle);
832     u->pcm_handle = NULL;
833
834     if (u->alsa_rtpoll_item) {
835         pa_rtpoll_item_free(u->alsa_rtpoll_item);
836         u->alsa_rtpoll_item = NULL;
837     }
838
839     pa_log_info("Device suspended...");
840
841     return 0;
842 }
843
844 /* Called from IO context */
845 static int update_sw_params(struct userdata *u) {
846     snd_pcm_uframes_t avail_min;
847     int err;
848
849     pa_assert(u);
850
851     /* Use the full buffer if noone asked us for anything specific */
852     u->hwbuf_unused = 0;
853
854     if (u->use_tsched) {
855         pa_usec_t latency;
856
857         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
858             size_t b;
859
860             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
861
862             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
863
864             /* We need at least one sample in our buffer */
865
866             if (PA_UNLIKELY(b < u->frame_size))
867                 b = u->frame_size;
868
869             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
870         }
871
872         fix_min_sleep_wakeup(u);
873         fix_tsched_watermark(u);
874     }
875
876     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
877
878     avail_min = 1;
879
880     if (u->use_tsched) {
881         pa_usec_t sleep_usec, process_usec;
882
883         hw_sleep_time(u, &sleep_usec, &process_usec);
884         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
885     }
886
887     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
888
889     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
890         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
891         return err;
892     }
893
894     return 0;
895 }
896
897 /* Called from IO context */
898 static int unsuspend(struct userdata *u) {
899     pa_sample_spec ss;
900     int err;
901     pa_bool_t b, d;
902     snd_pcm_uframes_t period_size, buffer_size;
903
904     pa_assert(u);
905     pa_assert(!u->pcm_handle);
906
907     pa_log_info("Trying resume...");
908
909     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
910                             SND_PCM_NONBLOCK|
911                             SND_PCM_NO_AUTO_RESAMPLE|
912                             SND_PCM_NO_AUTO_CHANNELS|
913                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
914         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
915         goto fail;
916     }
917
918     ss = u->source->sample_spec;
919     period_size = u->fragment_size / u->frame_size;
920     buffer_size = u->hwbuf_size / u->frame_size;
921     b = u->use_mmap;
922     d = u->use_tsched;
923
924     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
925         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
926         goto fail;
927     }
928
929     if (b != u->use_mmap || d != u->use_tsched) {
930         pa_log_warn("Resume failed, couldn't get original access mode.");
931         goto fail;
932     }
933
934     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
935         pa_log_warn("Resume failed, couldn't restore original sample settings.");
936         goto fail;
937     }
938
939     if (period_size*u->frame_size != u->fragment_size ||
940         buffer_size*u->frame_size != u->hwbuf_size) {
941         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
942                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
943                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
944         goto fail;
945     }
946
947     if (update_sw_params(u) < 0)
948         goto fail;
949
950     if (build_pollfd(u) < 0)
951         goto fail;
952
953     /* FIXME: We need to reload the volume somehow */
954
955     u->read_count = 0;
956     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
957     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
958     u->last_smoother_update = 0;
959
960     u->first = TRUE;
961
962     pa_log_info("Resumed successfully...");
963
964     return 0;
965
966 fail:
967     if (u->pcm_handle) {
968         snd_pcm_close(u->pcm_handle);
969         u->pcm_handle = NULL;
970     }
971
972     return -PA_ERR_IO;
973 }
974
975 /* Called from IO context */
976 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
977     struct userdata *u = PA_SOURCE(o)->userdata;
978
979     switch (code) {
980
981         case PA_SOURCE_MESSAGE_GET_LATENCY: {
982             pa_usec_t r = 0;
983
984             if (u->pcm_handle)
985                 r = source_get_latency(u);
986
987             *((pa_usec_t*) data) = r;
988
989             return 0;
990         }
991
992         case PA_SOURCE_MESSAGE_SET_STATE:
993
994             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
995
996                 case PA_SOURCE_SUSPENDED: {
997                     int r;
998
999                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1000
1001                     if ((r = suspend(u)) < 0)
1002                         return r;
1003
1004                     break;
1005                 }
1006
1007                 case PA_SOURCE_IDLE:
1008                 case PA_SOURCE_RUNNING: {
1009                     int r;
1010
1011                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1012                         if (build_pollfd(u) < 0)
1013                             return -PA_ERR_IO;
1014                     }
1015
1016                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1017                         if ((r = unsuspend(u)) < 0)
1018                             return r;
1019                     }
1020
1021                     break;
1022                 }
1023
1024                 case PA_SOURCE_UNLINKED:
1025                 case PA_SOURCE_INIT:
1026                 case PA_SOURCE_INVALID_STATE:
1027                     ;
1028             }
1029
1030             break;
1031     }
1032
1033     return pa_source_process_msg(o, code, data, offset, chunk);
1034 }
1035
1036 /* Called from main context */
1037 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1038     pa_source_state_t old_state;
1039     struct userdata *u;
1040
1041     pa_source_assert_ref(s);
1042     pa_assert_se(u = s->userdata);
1043
1044     old_state = pa_source_get_state(u->source);
1045
1046     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1047         reserve_done(u);
1048     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1049         if (reserve_init(u, u->device_name) < 0)
1050             return -PA_ERR_BUSY;
1051
1052     return 0;
1053 }
1054
1055 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1056     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1057
1058     pa_assert(u);
1059     pa_assert(u->mixer_handle);
1060
1061     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1062         return 0;
1063
1064     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1065         return 0;
1066
1067     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1068         pa_source_get_volume(u->source, TRUE);
1069         pa_source_get_mute(u->source, TRUE);
1070     }
1071
1072     return 0;
1073 }
1074
1075 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1076     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1077
1078     pa_assert(u);
1079     pa_assert(u->mixer_handle);
1080
1081     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1082         return 0;
1083
1084     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1085         return 0;
1086
1087     if (mask & SND_CTL_EVENT_MASK_VALUE)
1088         pa_source_update_volume_and_mute(u->source);
1089
1090     return 0;
1091 }
1092
1093 static void source_get_volume_cb(pa_source *s) {
1094     struct userdata *u = s->userdata;
1095     pa_cvolume r;
1096     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1097
1098     pa_assert(u);
1099     pa_assert(u->mixer_path);
1100     pa_assert(u->mixer_handle);
1101
1102     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1103         return;
1104
1105     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1106     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1107
1108     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1109
1110     if (u->mixer_path->has_dB) {
1111         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1112
1113         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1114     }
1115
1116     if (pa_cvolume_equal(&u->hardware_volume, &r))
1117         return;
1118
1119     s->real_volume = u->hardware_volume = r;
1120
1121     /* Hmm, so the hardware volume changed, let's reset our software volume */
1122     if (u->mixer_path->has_dB)
1123         pa_source_set_soft_volume(s, NULL);
1124 }
1125
1126 static void source_set_volume_cb(pa_source *s) {
1127     struct userdata *u = s->userdata;
1128     pa_cvolume r;
1129     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1130     pa_bool_t sync_volume = !!(s->flags & PA_SOURCE_SYNC_VOLUME);
1131
1132     pa_assert(u);
1133     pa_assert(u->mixer_path);
1134     pa_assert(u->mixer_handle);
1135
1136     /* Shift up by the base volume */
1137     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1138
1139     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, sync_volume, !sync_volume) < 0)
1140         return;
1141
1142     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1143     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1144
1145     u->hardware_volume = r;
1146
1147     if (u->mixer_path->has_dB) {
1148         pa_cvolume new_soft_volume;
1149         pa_bool_t accurate_enough;
1150         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1151
1152         /* Match exactly what the user requested by software */
1153         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1154
1155         /* If the adjustment to do in software is only minimal we
1156          * can skip it. That saves us CPU at the expense of a bit of
1157          * accuracy */
1158         accurate_enough =
1159             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1160             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1161
1162         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1163         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1164         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1165         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1166         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1167                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1168                      pa_yes_no(accurate_enough));
1169         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1170
1171         if (!accurate_enough)
1172             s->soft_volume = new_soft_volume;
1173
1174     } else {
1175         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1176
1177         /* We can't match exactly what the user requested, hence let's
1178          * at least tell the user about it */
1179
1180         s->real_volume = r;
1181     }
1182 }
1183
1184 static void source_write_volume_cb(pa_source *s) {
1185     struct userdata *u = s->userdata;
1186     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1187
1188     pa_assert(u);
1189     pa_assert(u->mixer_path);
1190     pa_assert(u->mixer_handle);
1191     pa_assert(s->flags & PA_SOURCE_SYNC_VOLUME);
1192
1193     /* Shift up by the base volume */
1194     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1195
1196     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1197         pa_log_error("Writing HW volume failed");
1198     else {
1199         pa_cvolume tmp_vol;
1200         pa_bool_t accurate_enough;
1201
1202         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1203         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1204
1205         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1206         accurate_enough =
1207             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1208             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1209
1210         if (!accurate_enough) {
1211             union {
1212                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1213                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1214             } vol;
1215
1216             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1217                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1218                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1219             pa_log_debug("                                           in dB: %s (request) != %s",
1220                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1221                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1222         }
1223     }
1224 }
1225
1226 static void source_get_mute_cb(pa_source *s) {
1227     struct userdata *u = s->userdata;
1228     pa_bool_t b;
1229
1230     pa_assert(u);
1231     pa_assert(u->mixer_path);
1232     pa_assert(u->mixer_handle);
1233
1234     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1235         return;
1236
1237     s->muted = b;
1238 }
1239
1240 static void source_set_mute_cb(pa_source *s) {
1241     struct userdata *u = s->userdata;
1242
1243     pa_assert(u);
1244     pa_assert(u->mixer_path);
1245     pa_assert(u->mixer_handle);
1246
1247     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1248 }
1249
1250 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1251     struct userdata *u = s->userdata;
1252     pa_alsa_port_data *data;
1253
1254     pa_assert(u);
1255     pa_assert(p);
1256     pa_assert(u->mixer_handle);
1257
1258     data = PA_DEVICE_PORT_DATA(p);
1259
1260     pa_assert_se(u->mixer_path = data->path);
1261     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1262
1263     if (u->mixer_path->has_volume && u->mixer_path->has_dB) {
1264         s->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1265         s->n_volume_steps = PA_VOLUME_NORM+1;
1266
1267         pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(s->base_volume));
1268     } else {
1269         s->base_volume = PA_VOLUME_NORM;
1270         s->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1271     }
1272
1273     if (data->setting)
1274         pa_alsa_setting_select(data->setting, u->mixer_handle);
1275
1276     if (s->set_mute)
1277         s->set_mute(s);
1278     if (s->set_volume)
1279         s->set_volume(s);
1280
1281     return 0;
1282 }
1283
1284 static void source_update_requested_latency_cb(pa_source *s) {
1285     struct userdata *u = s->userdata;
1286     pa_assert(u);
1287     pa_assert(u->use_tsched); /* only when timer scheduling is used
1288                                * we can dynamically adjust the
1289                                * latency */
1290
1291     if (!u->pcm_handle)
1292         return;
1293
1294     update_sw_params(u);
1295 }
1296
1297 static void thread_func(void *userdata) {
1298     struct userdata *u = userdata;
1299     unsigned short revents = 0;
1300
1301     pa_assert(u);
1302
1303     pa_log_debug("Thread starting up");
1304
1305     if (u->core->realtime_scheduling)
1306         pa_make_realtime(u->core->realtime_priority);
1307
1308     pa_thread_mq_install(&u->thread_mq);
1309
1310     for (;;) {
1311         int ret;
1312         pa_usec_t rtpoll_sleep = 0;
1313
1314 #ifdef DEBUG_TIMING
1315         pa_log_debug("Loop");
1316 #endif
1317
1318         /* Read some data and pass it to the sources */
1319         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1320             int work_done;
1321             pa_usec_t sleep_usec = 0;
1322             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1323
1324             if (u->first) {
1325                 pa_log_info("Starting capture.");
1326                 snd_pcm_start(u->pcm_handle);
1327
1328                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1329
1330                 u->first = FALSE;
1331             }
1332
1333             if (u->use_mmap)
1334                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1335             else
1336                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1337
1338             if (work_done < 0)
1339                 goto fail;
1340
1341 /*             pa_log_debug("work_done = %i", work_done); */
1342
1343             if (work_done)
1344                 update_smoother(u);
1345
1346             if (u->use_tsched) {
1347                 pa_usec_t cusec;
1348
1349                 /* OK, the capture buffer is now empty, let's
1350                  * calculate when to wake up next */
1351
1352 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1353
1354                 /* Convert from the sound card time domain to the
1355                  * system time domain */
1356                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1357
1358 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1359
1360                 /* We don't trust the conversion, so we wake up whatever comes first */
1361                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1362             }
1363         }
1364
1365         if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1366             pa_usec_t volume_sleep;
1367             pa_source_volume_change_apply(u->source, &volume_sleep);
1368             if (volume_sleep > 0)
1369                 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1370         }
1371
1372         if (rtpoll_sleep > 0)
1373             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1374         else
1375             pa_rtpoll_set_timer_disabled(u->rtpoll);
1376
1377         /* Hmm, nothing to do. Let's sleep */
1378         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1379             goto fail;
1380
1381         if (u->source->flags & PA_SOURCE_SYNC_VOLUME)
1382             pa_source_volume_change_apply(u->source, NULL);
1383
1384         if (ret == 0)
1385             goto finish;
1386
1387         /* Tell ALSA about this and process its response */
1388         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1389             struct pollfd *pollfd;
1390             int err;
1391             unsigned n;
1392
1393             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1394
1395             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1396                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1397                 goto fail;
1398             }
1399
1400             if (revents & ~POLLIN) {
1401                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1402                     goto fail;
1403
1404                 u->first = TRUE;
1405             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1406                 pa_log_debug("Wakeup from ALSA!");
1407
1408         } else
1409             revents = 0;
1410     }
1411
1412 fail:
1413     /* If this was no regular exit from the loop we have to continue
1414      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1415     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1416     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1417
1418 finish:
1419     pa_log_debug("Thread shutting down");
1420 }
1421
1422 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1423     const char *n;
1424     char *t;
1425
1426     pa_assert(data);
1427     pa_assert(ma);
1428     pa_assert(device_name);
1429
1430     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1431         pa_source_new_data_set_name(data, n);
1432         data->namereg_fail = TRUE;
1433         return;
1434     }
1435
1436     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1437         data->namereg_fail = TRUE;
1438     else {
1439         n = device_id ? device_id : device_name;
1440         data->namereg_fail = FALSE;
1441     }
1442
1443     if (mapping)
1444         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1445     else
1446         t = pa_sprintf_malloc("alsa_input.%s", n);
1447
1448     pa_source_new_data_set_name(data, t);
1449     pa_xfree(t);
1450 }
1451
1452 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1453
1454     if (!mapping && !element)
1455         return;
1456
1457     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1458         pa_log_info("Failed to find a working mixer device.");
1459         return;
1460     }
1461
1462     if (element) {
1463
1464         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1465             goto fail;
1466
1467         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1468             goto fail;
1469
1470         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1471         pa_alsa_path_dump(u->mixer_path);
1472     } else {
1473
1474         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1475             goto fail;
1476
1477         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1478
1479         pa_log_debug("Probed mixer paths:");
1480         pa_alsa_path_set_dump(u->mixer_path_set);
1481     }
1482
1483     return;
1484
1485 fail:
1486
1487     if (u->mixer_path_set) {
1488         pa_alsa_path_set_free(u->mixer_path_set);
1489         u->mixer_path_set = NULL;
1490     } else if (u->mixer_path) {
1491         pa_alsa_path_free(u->mixer_path);
1492         u->mixer_path = NULL;
1493     }
1494
1495     if (u->mixer_handle) {
1496         snd_mixer_close(u->mixer_handle);
1497         u->mixer_handle = NULL;
1498     }
1499 }
1500
1501 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB, pa_bool_t sync_volume) {
1502     pa_assert(u);
1503
1504     if (!u->mixer_handle)
1505         return 0;
1506
1507     if (u->source->active_port) {
1508         pa_alsa_port_data *data;
1509
1510         /* We have a list of supported paths, so let's activate the
1511          * one that has been chosen as active */
1512
1513         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1514         u->mixer_path = data->path;
1515
1516         pa_alsa_path_select(data->path, u->mixer_handle);
1517
1518         if (data->setting)
1519             pa_alsa_setting_select(data->setting, u->mixer_handle);
1520
1521     } else {
1522
1523         if (!u->mixer_path && u->mixer_path_set)
1524             u->mixer_path = u->mixer_path_set->paths;
1525
1526         if (u->mixer_path) {
1527             /* Hmm, we have only a single path, then let's activate it */
1528
1529             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1530
1531             if (u->mixer_path->settings)
1532                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1533         } else
1534             return 0;
1535     }
1536
1537     if (!u->mixer_path->has_volume)
1538         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1539     else {
1540
1541         if (u->mixer_path->has_dB) {
1542             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1543
1544             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1545             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1546
1547             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1548
1549         } else {
1550             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1551             u->source->base_volume = PA_VOLUME_NORM;
1552             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1553         }
1554
1555         u->source->get_volume = source_get_volume_cb;
1556         u->source->set_volume = source_set_volume_cb;
1557         u->source->write_volume = source_write_volume_cb;
1558
1559         u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL;
1560         if (u->mixer_path->has_dB) {
1561             u->source->flags |= PA_SOURCE_DECIBEL_VOLUME;
1562             if (sync_volume) {
1563                 u->source->flags |= PA_SOURCE_SYNC_VOLUME;
1564                 pa_log_info("Successfully enabled synchronous volume.");
1565             }
1566         }
1567
1568         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1569     }
1570
1571     if (!u->mixer_path->has_mute) {
1572         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1573     } else {
1574         u->source->get_mute = source_get_mute_cb;
1575         u->source->set_mute = source_set_mute_cb;
1576         u->source->flags |= PA_SOURCE_HW_MUTE_CTRL;
1577         pa_log_info("Using hardware mute control.");
1578     }
1579
1580     if (u->source->flags & (PA_SOURCE_HW_VOLUME_CTRL|PA_SOURCE_HW_MUTE_CTRL)) {
1581         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1582         if (u->source->flags & PA_SOURCE_SYNC_VOLUME) {
1583             u->mixer_pd = pa_alsa_mixer_pdata_new();
1584             mixer_callback = io_mixer_callback;
1585
1586             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1587                 pa_log("Failed to initialize file descriptor monitoring");
1588                 return -1;
1589             }
1590         } else {
1591             u->mixer_fdl = pa_alsa_fdlist_new();
1592             mixer_callback = ctl_mixer_callback;
1593
1594             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1595                 pa_log("Failed to initialize file descriptor monitoring");
1596                 return -1;
1597             }
1598         }
1599
1600         if (u->mixer_path_set)
1601             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1602         else
1603             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1604     }
1605
1606     return 0;
1607 }
1608
1609 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1610
1611     struct userdata *u = NULL;
1612     const char *dev_id = NULL;
1613     pa_sample_spec ss, requested_ss;
1614     pa_channel_map map;
1615     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1616     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1617     size_t frame_size;
1618     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, sync_volume = FALSE;
1619     pa_source_new_data data;
1620     pa_alsa_profile_set *profile_set = NULL;
1621
1622     pa_assert(m);
1623     pa_assert(ma);
1624
1625     ss = m->core->default_sample_spec;
1626     map = m->core->default_channel_map;
1627     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1628         pa_log("Failed to parse sample specification and channel map");
1629         goto fail;
1630     }
1631
1632     requested_ss = ss;
1633     frame_size = pa_frame_size(&ss);
1634
1635     nfrags = m->core->default_n_fragments;
1636     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1637     if (frag_size <= 0)
1638         frag_size = (uint32_t) frame_size;
1639     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1640     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1641
1642     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1643         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1644         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1645         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1646         pa_log("Failed to parse buffer metrics");
1647         goto fail;
1648     }
1649
1650     buffer_size = nfrags * frag_size;
1651
1652     period_frames = frag_size/frame_size;
1653     buffer_frames = buffer_size/frame_size;
1654     tsched_frames = tsched_size/frame_size;
1655
1656     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1657         pa_log("Failed to parse mmap argument.");
1658         goto fail;
1659     }
1660
1661     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1662         pa_log("Failed to parse tsched argument.");
1663         goto fail;
1664     }
1665
1666     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1667         pa_log("Failed to parse ignore_dB argument.");
1668         goto fail;
1669     }
1670
1671     sync_volume = m->core->sync_volume;
1672     if (pa_modargs_get_value_boolean(ma, "sync_volume", &sync_volume) < 0) {
1673         pa_log("Failed to parse sync_volume argument.");
1674         goto fail;
1675     }
1676
1677     use_tsched = pa_alsa_may_tsched(use_tsched);
1678
1679     u = pa_xnew0(struct userdata, 1);
1680     u->core = m->core;
1681     u->module = m;
1682     u->use_mmap = use_mmap;
1683     u->use_tsched = use_tsched;
1684     u->first = TRUE;
1685     u->rtpoll = pa_rtpoll_new();
1686     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1687
1688     u->smoother = pa_smoother_new(
1689             SMOOTHER_ADJUST_USEC,
1690             SMOOTHER_WINDOW_USEC,
1691             TRUE,
1692             TRUE,
1693             5,
1694             pa_rtclock_now(),
1695             TRUE);
1696     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1697
1698     dev_id = pa_modargs_get_value(
1699             ma, "device_id",
1700             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1701
1702     if (reserve_init(u, dev_id) < 0)
1703         goto fail;
1704
1705     if (reserve_monitor_init(u, dev_id) < 0)
1706         goto fail;
1707
1708     b = use_mmap;
1709     d = use_tsched;
1710
1711     if (mapping) {
1712
1713         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1714             pa_log("device_id= not set");
1715             goto fail;
1716         }
1717
1718         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1719                       dev_id,
1720                       &u->device_name,
1721                       &ss, &map,
1722                       SND_PCM_STREAM_CAPTURE,
1723                       &period_frames, &buffer_frames, tsched_frames,
1724                       &b, &d, mapping)))
1725             goto fail;
1726
1727     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1728
1729         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1730             goto fail;
1731
1732         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1733                       dev_id,
1734                       &u->device_name,
1735                       &ss, &map,
1736                       SND_PCM_STREAM_CAPTURE,
1737                       &period_frames, &buffer_frames, tsched_frames,
1738                       &b, &d, profile_set, &mapping)))
1739             goto fail;
1740
1741     } else {
1742
1743         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1744                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1745                       &u->device_name,
1746                       &ss, &map,
1747                       SND_PCM_STREAM_CAPTURE,
1748                       &period_frames, &buffer_frames, tsched_frames,
1749                       &b, &d, FALSE)))
1750             goto fail;
1751     }
1752
1753     pa_assert(u->device_name);
1754     pa_log_info("Successfully opened device %s.", u->device_name);
1755
1756     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1757         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1758         goto fail;
1759     }
1760
1761     if (mapping)
1762         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1763
1764     if (use_mmap && !b) {
1765         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1766         u->use_mmap = use_mmap = FALSE;
1767     }
1768
1769     if (use_tsched && (!b || !d)) {
1770         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1771         u->use_tsched = use_tsched = FALSE;
1772     }
1773
1774     if (u->use_mmap)
1775         pa_log_info("Successfully enabled mmap() mode.");
1776
1777     if (u->use_tsched)
1778         pa_log_info("Successfully enabled timer-based scheduling mode.");
1779
1780     /* ALSA might tweak the sample spec, so recalculate the frame size */
1781     frame_size = pa_frame_size(&ss);
1782
1783     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1784
1785     pa_source_new_data_init(&data);
1786     data.driver = driver;
1787     data.module = m;
1788     data.card = card;
1789     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1790
1791     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1792      * variable instead of using &data.namereg_fail directly, because
1793      * data.namereg_fail is a bitfield and taking the address of a bitfield
1794      * variable is impossible. */
1795     namereg_fail = data.namereg_fail;
1796     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1797         pa_log("Failed to parse boolean argument namereg_fail.");
1798         pa_source_new_data_done(&data);
1799         goto fail;
1800     }
1801     data.namereg_fail = namereg_fail;
1802
1803     pa_source_new_data_set_sample_spec(&data, &ss);
1804     pa_source_new_data_set_channel_map(&data, &map);
1805
1806     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1807     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1808     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1809     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1810     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1811
1812     if (mapping) {
1813         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1814         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1815     }
1816
1817     pa_alsa_init_description(data.proplist);
1818
1819     if (u->control_device)
1820         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1821
1822     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1823         pa_log("Invalid properties");
1824         pa_source_new_data_done(&data);
1825         goto fail;
1826     }
1827
1828     if (u->mixer_path_set)
1829         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1830
1831     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1832     pa_source_new_data_done(&data);
1833
1834     if (!u->source) {
1835         pa_log("Failed to create source object");
1836         goto fail;
1837     }
1838
1839     if (pa_modargs_get_value_u32(ma, "sync_volume_safety_margin",
1840                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1841         pa_log("Failed to parse sync_volume_safety_margin parameter");
1842         goto fail;
1843     }
1844
1845     if (pa_modargs_get_value_s32(ma, "sync_volume_extra_delay",
1846                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1847         pa_log("Failed to parse sync_volume_extra_delay parameter");
1848         goto fail;
1849     }
1850
1851     u->source->parent.process_msg = source_process_msg;
1852     if (u->use_tsched)
1853         u->source->update_requested_latency = source_update_requested_latency_cb;
1854     u->source->set_state = source_set_state_cb;
1855     u->source->set_port = source_set_port_cb;
1856     u->source->userdata = u;
1857
1858     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1859     pa_source_set_rtpoll(u->source, u->rtpoll);
1860
1861     u->frame_size = frame_size;
1862     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1863     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1864     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1865
1866     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1867                 (double) u->hwbuf_size / (double) u->fragment_size,
1868                 (long unsigned) u->fragment_size,
1869                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1870                 (long unsigned) u->hwbuf_size,
1871                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1872
1873     if (u->use_tsched) {
1874         u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1875
1876         u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1877         u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1878
1879         u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1880         u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1881
1882         fix_min_sleep_wakeup(u);
1883         fix_tsched_watermark(u);
1884
1885         pa_source_set_latency_range(u->source,
1886                                     0,
1887                                     pa_bytes_to_usec(u->hwbuf_size, &ss));
1888
1889         pa_log_info("Time scheduling watermark is %0.2fms",
1890                     (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1891     } else
1892         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1893
1894     reserve_update(u);
1895
1896     if (update_sw_params(u) < 0)
1897         goto fail;
1898
1899     if (setup_mixer(u, ignore_dB, sync_volume) < 0)
1900         goto fail;
1901
1902     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1903
1904     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1905         pa_log("Failed to create thread.");
1906         goto fail;
1907     }
1908
1909     /* Get initial mixer settings */
1910     if (data.volume_is_set) {
1911         if (u->source->set_volume)
1912             u->source->set_volume(u->source);
1913     } else {
1914         if (u->source->get_volume)
1915             u->source->get_volume(u->source);
1916     }
1917
1918     if (data.muted_is_set) {
1919         if (u->source->set_mute)
1920             u->source->set_mute(u->source);
1921     } else {
1922         if (u->source->get_mute)
1923             u->source->get_mute(u->source);
1924     }
1925
1926     pa_source_put(u->source);
1927
1928     if (profile_set)
1929         pa_alsa_profile_set_free(profile_set);
1930
1931     return u->source;
1932
1933 fail:
1934
1935     if (u)
1936         userdata_free(u);
1937
1938     if (profile_set)
1939         pa_alsa_profile_set_free(profile_set);
1940
1941     return NULL;
1942 }
1943
1944 static void userdata_free(struct userdata *u) {
1945     pa_assert(u);
1946
1947     if (u->source)
1948         pa_source_unlink(u->source);
1949
1950     if (u->thread) {
1951         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1952         pa_thread_free(u->thread);
1953     }
1954
1955     pa_thread_mq_done(&u->thread_mq);
1956
1957     if (u->source)
1958         pa_source_unref(u->source);
1959
1960     if (u->mixer_pd)
1961         pa_alsa_mixer_pdata_free(u->mixer_pd);
1962
1963     if (u->alsa_rtpoll_item)
1964         pa_rtpoll_item_free(u->alsa_rtpoll_item);
1965
1966     if (u->rtpoll)
1967         pa_rtpoll_free(u->rtpoll);
1968
1969     if (u->pcm_handle) {
1970         snd_pcm_drop(u->pcm_handle);
1971         snd_pcm_close(u->pcm_handle);
1972     }
1973
1974     if (u->mixer_fdl)
1975         pa_alsa_fdlist_free(u->mixer_fdl);
1976
1977     if (u->mixer_path_set)
1978         pa_alsa_path_set_free(u->mixer_path_set);
1979     else if (u->mixer_path)
1980         pa_alsa_path_free(u->mixer_path);
1981
1982     if (u->mixer_handle)
1983         snd_mixer_close(u->mixer_handle);
1984
1985     if (u->smoother)
1986         pa_smoother_free(u->smoother);
1987
1988     reserve_done(u);
1989     monitor_done(u);
1990
1991     pa_xfree(u->device_name);
1992     pa_xfree(u->control_device);
1993     pa_xfree(u);
1994 }
1995
1996 void pa_alsa_source_free(pa_source *s) {
1997     struct userdata *u;
1998
1999     pa_source_assert_ref(s);
2000     pa_assert_se(u = s->userdata);
2001
2002     userdata_free(u);
2003 }