Fix deferred volume not being applied if sink is closed
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     size_t
102         frame_size,
103         fragment_size,
104         hwbuf_size,
105         tsched_watermark,
106         tsched_watermark_ref,
107         hwbuf_unused,
108         min_sleep,
109         min_wakeup,
110         watermark_inc_step,
111         watermark_dec_step,
112         watermark_inc_threshold,
113         watermark_dec_threshold;
114
115     pa_usec_t watermark_dec_not_before;
116     pa_usec_t min_latency_ref;
117
118     char *device_name;  /* name of the PCM device */
119     char *control_device; /* name of the control device */
120
121     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
122
123     pa_bool_t first;
124
125     pa_rtpoll_item *alsa_rtpoll_item;
126
127     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
128
129     pa_smoother *smoother;
130     uint64_t read_count;
131     pa_usec_t smoother_interval;
132     pa_usec_t last_smoother_update;
133
134     pa_reserve_wrapper *reserve;
135     pa_hook_slot *reserve_slot;
136     pa_reserve_monitor_wrapper *monitor;
137     pa_hook_slot *monitor_slot;
138 };
139
140 static void userdata_free(struct userdata *u);
141
142 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
143     pa_assert(r);
144     pa_assert(u);
145
146     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
147         return PA_HOOK_CANCEL;
148
149     return PA_HOOK_OK;
150 }
151
152 static void reserve_done(struct userdata *u) {
153     pa_assert(u);
154
155     if (u->reserve_slot) {
156         pa_hook_slot_free(u->reserve_slot);
157         u->reserve_slot = NULL;
158     }
159
160     if (u->reserve) {
161         pa_reserve_wrapper_unref(u->reserve);
162         u->reserve = NULL;
163     }
164 }
165
166 static void reserve_update(struct userdata *u) {
167     const char *description;
168     pa_assert(u);
169
170     if (!u->source || !u->reserve)
171         return;
172
173     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
174         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
175 }
176
177 static int reserve_init(struct userdata *u, const char *dname) {
178     char *rname;
179
180     pa_assert(u);
181     pa_assert(dname);
182
183     if (u->reserve)
184         return 0;
185
186     if (pa_in_system_mode())
187         return 0;
188
189     if (!(rname = pa_alsa_get_reserve_name(dname)))
190         return 0;
191
192     /* We are resuming, try to lock the device */
193     u->reserve = pa_reserve_wrapper_get(u->core, rname);
194     pa_xfree(rname);
195
196     if (!(u->reserve))
197         return -1;
198
199     reserve_update(u);
200
201     pa_assert(!u->reserve_slot);
202     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
203
204     return 0;
205 }
206
207 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
208     pa_bool_t b;
209
210     pa_assert(w);
211     pa_assert(u);
212
213     b = PA_PTR_TO_UINT(busy) && !u->reserve;
214
215     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
216     return PA_HOOK_OK;
217 }
218
219 static void monitor_done(struct userdata *u) {
220     pa_assert(u);
221
222     if (u->monitor_slot) {
223         pa_hook_slot_free(u->monitor_slot);
224         u->monitor_slot = NULL;
225     }
226
227     if (u->monitor) {
228         pa_reserve_monitor_wrapper_unref(u->monitor);
229         u->monitor = NULL;
230     }
231 }
232
233 static int reserve_monitor_init(struct userdata *u, const char *dname) {
234     char *rname;
235
236     pa_assert(u);
237     pa_assert(dname);
238
239     if (pa_in_system_mode())
240         return 0;
241
242     if (!(rname = pa_alsa_get_reserve_name(dname)))
243         return 0;
244
245     /* We are resuming, try to lock the device */
246     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
247     pa_xfree(rname);
248
249     if (!(u->monitor))
250         return -1;
251
252     pa_assert(!u->monitor_slot);
253     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
254
255     return 0;
256 }
257
258 static void fix_min_sleep_wakeup(struct userdata *u) {
259     size_t max_use, max_use_2;
260
261     pa_assert(u);
262     pa_assert(u->use_tsched);
263
264     max_use = u->hwbuf_size - u->hwbuf_unused;
265     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
266
267     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
268     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
269
270     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
271     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
272 }
273
274 static void fix_tsched_watermark(struct userdata *u) {
275     size_t max_use;
276     pa_assert(u);
277     pa_assert(u->use_tsched);
278
279     max_use = u->hwbuf_size - u->hwbuf_unused;
280
281     if (u->tsched_watermark > max_use - u->min_sleep)
282         u->tsched_watermark = max_use - u->min_sleep;
283
284     if (u->tsched_watermark < u->min_wakeup)
285         u->tsched_watermark = u->min_wakeup;
286 }
287
288 static void increase_watermark(struct userdata *u) {
289     size_t old_watermark;
290     pa_usec_t old_min_latency, new_min_latency;
291
292     pa_assert(u);
293     pa_assert(u->use_tsched);
294
295     /* First, just try to increase the watermark */
296     old_watermark = u->tsched_watermark;
297     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
298     fix_tsched_watermark(u);
299
300     if (old_watermark != u->tsched_watermark) {
301         pa_log_info("Increasing wakeup watermark to %0.2f ms",
302                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
303         return;
304     }
305
306     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
307     old_min_latency = u->source->thread_info.min_latency;
308     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
309     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
310
311     if (old_min_latency != new_min_latency) {
312         pa_log_info("Increasing minimal latency to %0.2f ms",
313                     (double) new_min_latency / PA_USEC_PER_MSEC);
314
315         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
316     }
317
318     /* When we reach this we're officialy fucked! */
319 }
320
321 static void decrease_watermark(struct userdata *u) {
322     size_t old_watermark;
323     pa_usec_t now;
324
325     pa_assert(u);
326     pa_assert(u->use_tsched);
327
328     now = pa_rtclock_now();
329
330     if (u->watermark_dec_not_before <= 0)
331         goto restart;
332
333     if (u->watermark_dec_not_before > now)
334         return;
335
336     old_watermark = u->tsched_watermark;
337
338     if (u->tsched_watermark < u->watermark_dec_step)
339         u->tsched_watermark = u->tsched_watermark / 2;
340     else
341         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
342
343     fix_tsched_watermark(u);
344
345     if (old_watermark != u->tsched_watermark)
346         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
347                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
348
349     /* We don't change the latency range*/
350
351 restart:
352     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
353 }
354
355 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
356     pa_usec_t wm, usec;
357
358     pa_assert(sleep_usec);
359     pa_assert(process_usec);
360
361     pa_assert(u);
362     pa_assert(u->use_tsched);
363
364     usec = pa_source_get_requested_latency_within_thread(u->source);
365
366     if (usec == (pa_usec_t) -1)
367         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
368
369     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
370
371     if (wm > usec)
372         wm = usec/2;
373
374     *sleep_usec = usec - wm;
375     *process_usec = wm;
376
377 #ifdef DEBUG_TIMING
378     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
379                  (unsigned long) (usec / PA_USEC_PER_MSEC),
380                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
381                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
382 #endif
383 }
384
385 static int try_recover(struct userdata *u, const char *call, int err) {
386     pa_assert(u);
387     pa_assert(call);
388     pa_assert(err < 0);
389
390     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
391
392     pa_assert(err != -EAGAIN);
393
394     if (err == -EPIPE)
395         pa_log_debug("%s: Buffer overrun!", call);
396
397     if (err == -ESTRPIPE)
398         pa_log_debug("%s: System suspended!", call);
399
400     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
401         pa_log("%s: %s", call, pa_alsa_strerror(err));
402         return -1;
403     }
404
405     u->first = TRUE;
406     return 0;
407 }
408
409 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
410     size_t left_to_record;
411     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
412     pa_bool_t overrun = FALSE;
413
414     /* We use <= instead of < for this check here because an overrun
415      * only happens after the last sample was processed, not already when
416      * it is removed from the buffer. This is particularly important
417      * when block transfer is used. */
418
419     if (n_bytes <= rec_space)
420         left_to_record = rec_space - n_bytes;
421     else {
422
423         /* We got a dropout. What a mess! */
424         left_to_record = 0;
425         overrun = TRUE;
426
427 #ifdef DEBUG_TIMING
428         PA_DEBUG_TRAP;
429 #endif
430
431         if (pa_log_ratelimit(PA_LOG_INFO))
432             pa_log_info("Overrun!");
433     }
434
435 #ifdef DEBUG_TIMING
436     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
437 #endif
438
439     if (u->use_tsched) {
440         pa_bool_t reset_not_before = TRUE;
441
442         if (overrun || left_to_record < u->watermark_inc_threshold)
443             increase_watermark(u);
444         else if (left_to_record > u->watermark_dec_threshold) {
445             reset_not_before = FALSE;
446
447             /* We decrease the watermark only if have actually
448              * been woken up by a timeout. If something else woke
449              * us up it's too easy to fulfill the deadlines... */
450
451             if (on_timeout)
452                 decrease_watermark(u);
453         }
454
455         if (reset_not_before)
456             u->watermark_dec_not_before = 0;
457     }
458
459     return left_to_record;
460 }
461
462 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
463     pa_bool_t work_done = FALSE;
464     pa_usec_t max_sleep_usec = 0, process_usec = 0;
465     size_t left_to_record;
466     unsigned j = 0;
467
468     pa_assert(u);
469     pa_source_assert_ref(u->source);
470
471     if (u->use_tsched)
472         hw_sleep_time(u, &max_sleep_usec, &process_usec);
473
474     for (;;) {
475         snd_pcm_sframes_t n;
476         size_t n_bytes;
477         int r;
478         pa_bool_t after_avail = TRUE;
479
480         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
481
482             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
483                 continue;
484
485             return r;
486         }
487
488         n_bytes = (size_t) n * u->frame_size;
489
490 #ifdef DEBUG_TIMING
491         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 #endif
493
494         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
495         on_timeout = FALSE;
496
497         if (u->use_tsched)
498             if (!polled &&
499                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
500 #ifdef DEBUG_TIMING
501                 pa_log_debug("Not reading, because too early.");
502 #endif
503                 break;
504             }
505
506         if (PA_UNLIKELY(n_bytes <= 0)) {
507
508             if (polled)
509                 PA_ONCE_BEGIN {
510                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
511                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
512                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
513                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
514                            pa_strnull(dn));
515                     pa_xfree(dn);
516                 } PA_ONCE_END;
517
518 #ifdef DEBUG_TIMING
519             pa_log_debug("Not reading, because not necessary.");
520 #endif
521             break;
522         }
523
524
525         if (++j > 10) {
526 #ifdef DEBUG_TIMING
527             pa_log_debug("Not filling up, because already too many iterations.");
528 #endif
529
530             break;
531         }
532
533         polled = FALSE;
534
535 #ifdef DEBUG_TIMING
536         pa_log_debug("Reading");
537 #endif
538
539         for (;;) {
540             pa_memchunk chunk;
541             void *p;
542             int err;
543             const snd_pcm_channel_area_t *areas;
544             snd_pcm_uframes_t offset, frames;
545             snd_pcm_sframes_t sframes;
546
547             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
548 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
549
550             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
551
552                 if (!after_avail && err == -EAGAIN)
553                     break;
554
555                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
556                     continue;
557
558                 return r;
559             }
560
561             /* Make sure that if these memblocks need to be copied they will fit into one slot */
562             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
563                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
564
565             if (!after_avail && frames == 0)
566                 break;
567
568             pa_assert(frames > 0);
569             after_avail = FALSE;
570
571             /* Check these are multiples of 8 bit */
572             pa_assert((areas[0].first & 7) == 0);
573             pa_assert((areas[0].step & 7)== 0);
574
575             /* We assume a single interleaved memory buffer */
576             pa_assert((areas[0].first >> 3) == 0);
577             pa_assert((areas[0].step >> 3) == u->frame_size);
578
579             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
580
581             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
582             chunk.length = pa_memblock_get_length(chunk.memblock);
583             chunk.index = 0;
584
585             pa_source_post(u->source, &chunk);
586             pa_memblock_unref_fixed(chunk.memblock);
587
588             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
589
590                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
591                     continue;
592
593                 return r;
594             }
595
596             work_done = TRUE;
597
598             u->read_count += frames * u->frame_size;
599
600 #ifdef DEBUG_TIMING
601             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 #endif
603
604             if ((size_t) frames * u->frame_size >= n_bytes)
605                 break;
606
607             n_bytes -= (size_t) frames * u->frame_size;
608         }
609     }
610
611     if (u->use_tsched) {
612         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
613         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
614
615         if (*sleep_usec > process_usec)
616             *sleep_usec -= process_usec;
617         else
618             *sleep_usec = 0;
619     }
620
621     return work_done ? 1 : 0;
622 }
623
624 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
625     int work_done = FALSE;
626     pa_usec_t max_sleep_usec = 0, process_usec = 0;
627     size_t left_to_record;
628     unsigned j = 0;
629
630     pa_assert(u);
631     pa_source_assert_ref(u->source);
632
633     if (u->use_tsched)
634         hw_sleep_time(u, &max_sleep_usec, &process_usec);
635
636     for (;;) {
637         snd_pcm_sframes_t n;
638         size_t n_bytes;
639         int r;
640         pa_bool_t after_avail = TRUE;
641
642         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
643
644             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
645                 continue;
646
647             return r;
648         }
649
650         n_bytes = (size_t) n * u->frame_size;
651         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
652         on_timeout = FALSE;
653
654         if (u->use_tsched)
655             if (!polled &&
656                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
657                 break;
658
659         if (PA_UNLIKELY(n_bytes <= 0)) {
660
661             if (polled)
662                 PA_ONCE_BEGIN {
663                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
664                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
665                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
666                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
667                            pa_strnull(dn));
668                     pa_xfree(dn);
669                 } PA_ONCE_END;
670
671             break;
672         }
673
674         if (++j > 10) {
675 #ifdef DEBUG_TIMING
676             pa_log_debug("Not filling up, because already too many iterations.");
677 #endif
678
679             break;
680         }
681
682         polled = FALSE;
683
684         for (;;) {
685             void *p;
686             snd_pcm_sframes_t frames;
687             pa_memchunk chunk;
688
689             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
690
691             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
692
693             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
694                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
695
696 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
697
698             p = pa_memblock_acquire(chunk.memblock);
699             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
700             pa_memblock_release(chunk.memblock);
701
702             if (PA_UNLIKELY(frames < 0)) {
703                 pa_memblock_unref(chunk.memblock);
704
705                 if (!after_avail && (int) frames == -EAGAIN)
706                     break;
707
708                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
709                     continue;
710
711                 return r;
712             }
713
714             if (!after_avail && frames == 0) {
715                 pa_memblock_unref(chunk.memblock);
716                 break;
717             }
718
719             pa_assert(frames > 0);
720             after_avail = FALSE;
721
722             chunk.index = 0;
723             chunk.length = (size_t) frames * u->frame_size;
724
725             pa_source_post(u->source, &chunk);
726             pa_memblock_unref(chunk.memblock);
727
728             work_done = TRUE;
729
730             u->read_count += frames * u->frame_size;
731
732 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
733
734             if ((size_t) frames * u->frame_size >= n_bytes)
735                 break;
736
737             n_bytes -= (size_t) frames * u->frame_size;
738         }
739     }
740
741     if (u->use_tsched) {
742         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
743         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
744
745         if (*sleep_usec > process_usec)
746             *sleep_usec -= process_usec;
747         else
748             *sleep_usec = 0;
749     }
750
751     return work_done ? 1 : 0;
752 }
753
754 static void update_smoother(struct userdata *u) {
755     snd_pcm_sframes_t delay = 0;
756     uint64_t position;
757     int err;
758     pa_usec_t now1 = 0, now2;
759     snd_pcm_status_t *status;
760
761     snd_pcm_status_alloca(&status);
762
763     pa_assert(u);
764     pa_assert(u->pcm_handle);
765
766     /* Let's update the time smoother */
767
768     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
769         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
770         return;
771     }
772
773     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
774         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
775     else {
776         snd_htimestamp_t htstamp = { 0, 0 };
777         snd_pcm_status_get_htstamp(status, &htstamp);
778         now1 = pa_timespec_load(&htstamp);
779     }
780
781     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
782     if (now1 <= 0)
783         now1 = pa_rtclock_now();
784
785     /* check if the time since the last update is bigger than the interval */
786     if (u->last_smoother_update > 0)
787         if (u->last_smoother_update + u->smoother_interval > now1)
788             return;
789
790     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
791     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
792
793     pa_smoother_put(u->smoother, now1, now2);
794
795     u->last_smoother_update = now1;
796     /* exponentially increase the update interval up to the MAX limit */
797     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
798 }
799
800 static pa_usec_t source_get_latency(struct userdata *u) {
801     int64_t delay;
802     pa_usec_t now1, now2;
803
804     pa_assert(u);
805
806     now1 = pa_rtclock_now();
807     now2 = pa_smoother_get(u->smoother, now1);
808
809     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
810
811     return delay >= 0 ? (pa_usec_t) delay : 0;
812 }
813
814 static int build_pollfd(struct userdata *u) {
815     pa_assert(u);
816     pa_assert(u->pcm_handle);
817
818     if (u->alsa_rtpoll_item)
819         pa_rtpoll_item_free(u->alsa_rtpoll_item);
820
821     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
822         return -1;
823
824     return 0;
825 }
826
827 /* Called from IO context */
828 static int suspend(struct userdata *u) {
829     pa_assert(u);
830     pa_assert(u->pcm_handle);
831
832     pa_smoother_pause(u->smoother, pa_rtclock_now());
833
834     /* Let's suspend */
835     snd_pcm_close(u->pcm_handle);
836     u->pcm_handle = NULL;
837
838     if (u->alsa_rtpoll_item) {
839         pa_rtpoll_item_free(u->alsa_rtpoll_item);
840         u->alsa_rtpoll_item = NULL;
841     }
842
843     pa_log_info("Device suspended...");
844
845     return 0;
846 }
847
848 /* Called from IO context */
849 static int update_sw_params(struct userdata *u) {
850     snd_pcm_uframes_t avail_min;
851     int err;
852
853     pa_assert(u);
854
855     /* Use the full buffer if no one asked us for anything specific */
856     u->hwbuf_unused = 0;
857
858     if (u->use_tsched) {
859         pa_usec_t latency;
860
861         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
862             size_t b;
863
864             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
865
866             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
867
868             /* We need at least one sample in our buffer */
869
870             if (PA_UNLIKELY(b < u->frame_size))
871                 b = u->frame_size;
872
873             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
874         }
875
876         fix_min_sleep_wakeup(u);
877         fix_tsched_watermark(u);
878     }
879
880     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
881
882     avail_min = 1;
883
884     if (u->use_tsched) {
885         pa_usec_t sleep_usec, process_usec;
886
887         hw_sleep_time(u, &sleep_usec, &process_usec);
888         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
889     }
890
891     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
892
893     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
894         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
895         return err;
896     }
897
898     return 0;
899 }
900
901 /* Called from IO Context on unsuspend or from main thread when creating source */
902 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
903                             pa_bool_t in_thread)
904 {
905     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
906                                                     &u->source->sample_spec);
907
908     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
909     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
910
911     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
912     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
913
914     fix_min_sleep_wakeup(u);
915     fix_tsched_watermark(u);
916
917     if (in_thread)
918         pa_source_set_latency_range_within_thread(u->source,
919                                                   u->min_latency_ref,
920                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
921     else {
922         pa_source_set_latency_range(u->source,
923                                     0,
924                                     pa_bytes_to_usec(u->hwbuf_size, ss));
925
926         /* work-around assert in pa_source_set_latency_within_thead,
927            keep track of min_latency and reuse it when
928            this routine is called from IO context */
929         u->min_latency_ref = u->source->thread_info.min_latency;
930     }
931
932     pa_log_info("Time scheduling watermark is %0.2fms",
933                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
934 }
935
936 /* Called from IO context */
937 static int unsuspend(struct userdata *u) {
938     pa_sample_spec ss;
939     int err;
940     pa_bool_t b, d;
941     snd_pcm_uframes_t period_size, buffer_size;
942
943     pa_assert(u);
944     pa_assert(!u->pcm_handle);
945
946     pa_log_info("Trying resume...");
947
948     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
949                             SND_PCM_NONBLOCK|
950                             SND_PCM_NO_AUTO_RESAMPLE|
951                             SND_PCM_NO_AUTO_CHANNELS|
952                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
953         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
954         goto fail;
955     }
956
957     ss = u->source->sample_spec;
958     period_size = u->fragment_size / u->frame_size;
959     buffer_size = u->hwbuf_size / u->frame_size;
960     b = u->use_mmap;
961     d = u->use_tsched;
962
963     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
964         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
965         goto fail;
966     }
967
968     if (b != u->use_mmap || d != u->use_tsched) {
969         pa_log_warn("Resume failed, couldn't get original access mode.");
970         goto fail;
971     }
972
973     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
974         pa_log_warn("Resume failed, couldn't restore original sample settings.");
975         goto fail;
976     }
977
978     if (period_size*u->frame_size != u->fragment_size ||
979         buffer_size*u->frame_size != u->hwbuf_size) {
980         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
981                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
982                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
983         goto fail;
984     }
985
986     if (update_sw_params(u) < 0)
987         goto fail;
988
989     if (build_pollfd(u) < 0)
990         goto fail;
991
992     /* FIXME: We need to reload the volume somehow */
993
994     u->read_count = 0;
995     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
996     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
997     u->last_smoother_update = 0;
998
999     u->first = TRUE;
1000
1001     /* reset the watermark to the value defined when source was created */
1002     if (u->use_tsched)
1003         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1004
1005     pa_log_info("Resumed successfully...");
1006
1007     return 0;
1008
1009 fail:
1010     if (u->pcm_handle) {
1011         snd_pcm_close(u->pcm_handle);
1012         u->pcm_handle = NULL;
1013     }
1014
1015     return -PA_ERR_IO;
1016 }
1017
1018 /* Called from IO context */
1019 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1020     struct userdata *u = PA_SOURCE(o)->userdata;
1021
1022     switch (code) {
1023
1024         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1025             pa_usec_t r = 0;
1026
1027             if (u->pcm_handle)
1028                 r = source_get_latency(u);
1029
1030             *((pa_usec_t*) data) = r;
1031
1032             return 0;
1033         }
1034
1035         case PA_SOURCE_MESSAGE_SET_STATE:
1036
1037             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1038
1039                 case PA_SOURCE_SUSPENDED: {
1040                     int r;
1041
1042                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1043
1044                     if ((r = suspend(u)) < 0)
1045                         return r;
1046
1047                     break;
1048                 }
1049
1050                 case PA_SOURCE_IDLE:
1051                 case PA_SOURCE_RUNNING: {
1052                     int r;
1053
1054                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1055                         if (build_pollfd(u) < 0)
1056                             return -PA_ERR_IO;
1057                     }
1058
1059                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1060                         if ((r = unsuspend(u)) < 0)
1061                             return r;
1062                     }
1063
1064                     break;
1065                 }
1066
1067                 case PA_SOURCE_UNLINKED:
1068                 case PA_SOURCE_INIT:
1069                 case PA_SOURCE_INVALID_STATE:
1070                     ;
1071             }
1072
1073             break;
1074     }
1075
1076     return pa_source_process_msg(o, code, data, offset, chunk);
1077 }
1078
1079 /* Called from main context */
1080 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1081     pa_source_state_t old_state;
1082     struct userdata *u;
1083
1084     pa_source_assert_ref(s);
1085     pa_assert_se(u = s->userdata);
1086
1087     old_state = pa_source_get_state(u->source);
1088
1089     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1090         reserve_done(u);
1091     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1092         if (reserve_init(u, u->device_name) < 0)
1093             return -PA_ERR_BUSY;
1094
1095     return 0;
1096 }
1097
1098 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1099     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1100
1101     pa_assert(u);
1102     pa_assert(u->mixer_handle);
1103
1104     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1105         return 0;
1106
1107     if (!PA_SOURCE_IS_LINKED(u->source->state))
1108         return 0;
1109
1110     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1111         return 0;
1112
1113     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1114         pa_source_get_volume(u->source, TRUE);
1115         pa_source_get_mute(u->source, TRUE);
1116     }
1117
1118     return 0;
1119 }
1120
1121 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1122     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1123
1124     pa_assert(u);
1125     pa_assert(u->mixer_handle);
1126
1127     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1128         return 0;
1129
1130     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1131         return 0;
1132
1133     if (mask & SND_CTL_EVENT_MASK_VALUE)
1134         pa_source_update_volume_and_mute(u->source);
1135
1136     return 0;
1137 }
1138
1139 static void source_get_volume_cb(pa_source *s) {
1140     struct userdata *u = s->userdata;
1141     pa_cvolume r;
1142     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1143
1144     pa_assert(u);
1145     pa_assert(u->mixer_path);
1146     pa_assert(u->mixer_handle);
1147
1148     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1149         return;
1150
1151     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1152     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1153
1154     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1155
1156     if (u->mixer_path->has_dB) {
1157         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1158
1159         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1160     }
1161
1162     if (pa_cvolume_equal(&u->hardware_volume, &r))
1163         return;
1164
1165     s->real_volume = u->hardware_volume = r;
1166
1167     /* Hmm, so the hardware volume changed, let's reset our software volume */
1168     if (u->mixer_path->has_dB)
1169         pa_source_set_soft_volume(s, NULL);
1170 }
1171
1172 static void source_set_volume_cb(pa_source *s) {
1173     struct userdata *u = s->userdata;
1174     pa_cvolume r;
1175     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1176     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1177
1178     pa_assert(u);
1179     pa_assert(u->mixer_path);
1180     pa_assert(u->mixer_handle);
1181
1182     /* Shift up by the base volume */
1183     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1184
1185     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1186         return;
1187
1188     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1189     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1190
1191     u->hardware_volume = r;
1192
1193     if (u->mixer_path->has_dB) {
1194         pa_cvolume new_soft_volume;
1195         pa_bool_t accurate_enough;
1196         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1197
1198         /* Match exactly what the user requested by software */
1199         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1200
1201         /* If the adjustment to do in software is only minimal we
1202          * can skip it. That saves us CPU at the expense of a bit of
1203          * accuracy */
1204         accurate_enough =
1205             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1206             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1207
1208         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1209         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1210         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1211         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1212         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1213                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1214                      pa_yes_no(accurate_enough));
1215         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1216
1217         if (!accurate_enough)
1218             s->soft_volume = new_soft_volume;
1219
1220     } else {
1221         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1222
1223         /* We can't match exactly what the user requested, hence let's
1224          * at least tell the user about it */
1225
1226         s->real_volume = r;
1227     }
1228 }
1229
1230 static void source_write_volume_cb(pa_source *s) {
1231     struct userdata *u = s->userdata;
1232     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1233
1234     pa_assert(u);
1235     pa_assert(u->mixer_path);
1236     pa_assert(u->mixer_handle);
1237     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1238
1239     /* Shift up by the base volume */
1240     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1241
1242     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1243         pa_log_error("Writing HW volume failed");
1244     else {
1245         pa_cvolume tmp_vol;
1246         pa_bool_t accurate_enough;
1247
1248         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1249         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1250
1251         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1252         accurate_enough =
1253             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1254             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1255
1256         if (!accurate_enough) {
1257             union {
1258                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1259                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1260             } vol;
1261
1262             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1263                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1264                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1265             pa_log_debug("                                           in dB: %s (request) != %s",
1266                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1267                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1268         }
1269     }
1270 }
1271
1272 static void source_get_mute_cb(pa_source *s) {
1273     struct userdata *u = s->userdata;
1274     pa_bool_t b;
1275
1276     pa_assert(u);
1277     pa_assert(u->mixer_path);
1278     pa_assert(u->mixer_handle);
1279
1280     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1281         return;
1282
1283     s->muted = b;
1284 }
1285
1286 static void source_set_mute_cb(pa_source *s) {
1287     struct userdata *u = s->userdata;
1288
1289     pa_assert(u);
1290     pa_assert(u->mixer_path);
1291     pa_assert(u->mixer_handle);
1292
1293     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1294 }
1295
1296 static void mixer_volume_init(struct userdata *u) {
1297     pa_assert(u);
1298
1299     if (!u->mixer_path->has_volume) {
1300         pa_source_set_write_volume_callback(u->source, NULL);
1301         pa_source_set_get_volume_callback(u->source, NULL);
1302         pa_source_set_set_volume_callback(u->source, NULL);
1303
1304         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1305     } else {
1306         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1307         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1308
1309         if (u->mixer_path->has_dB && u->deferred_volume) {
1310             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1311             pa_log_info("Successfully enabled synchronous volume.");
1312         } else
1313             pa_source_set_write_volume_callback(u->source, NULL);
1314
1315         if (u->mixer_path->has_dB) {
1316             pa_source_enable_decibel_volume(u->source, TRUE);
1317             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1318
1319             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1320             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1321
1322             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1323         } else {
1324             pa_source_enable_decibel_volume(u->source, FALSE);
1325             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1326
1327             u->source->base_volume = PA_VOLUME_NORM;
1328             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1329         }
1330
1331         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1332     }
1333
1334     if (!u->mixer_path->has_mute) {
1335         pa_source_set_get_mute_callback(u->source, NULL);
1336         pa_source_set_set_mute_callback(u->source, NULL);
1337         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1338     } else {
1339         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1340         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1341         pa_log_info("Using hardware mute control.");
1342     }
1343 }
1344
1345 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1346     struct userdata *u = s->userdata;
1347     pa_alsa_port_data *data;
1348
1349     pa_assert(u);
1350     pa_assert(p);
1351     pa_assert(u->mixer_handle);
1352
1353     data = PA_DEVICE_PORT_DATA(p);
1354
1355     pa_assert_se(u->mixer_path = data->path);
1356     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1357
1358     mixer_volume_init(u);
1359
1360     if (data->setting)
1361         pa_alsa_setting_select(data->setting, u->mixer_handle);
1362
1363     if (s->set_mute)
1364         s->set_mute(s);
1365     if (s->set_volume)
1366         s->set_volume(s);
1367
1368     return 0;
1369 }
1370
1371 static void source_update_requested_latency_cb(pa_source *s) {
1372     struct userdata *u = s->userdata;
1373     pa_assert(u);
1374     pa_assert(u->use_tsched); /* only when timer scheduling is used
1375                                * we can dynamically adjust the
1376                                * latency */
1377
1378     if (!u->pcm_handle)
1379         return;
1380
1381     update_sw_params(u);
1382 }
1383
1384 static void thread_func(void *userdata) {
1385     struct userdata *u = userdata;
1386     unsigned short revents = 0;
1387
1388     pa_assert(u);
1389
1390     pa_log_debug("Thread starting up");
1391
1392     if (u->core->realtime_scheduling)
1393         pa_make_realtime(u->core->realtime_priority);
1394
1395     pa_thread_mq_install(&u->thread_mq);
1396
1397     for (;;) {
1398         int ret;
1399         pa_usec_t rtpoll_sleep = 0;
1400
1401 #ifdef DEBUG_TIMING
1402         pa_log_debug("Loop");
1403 #endif
1404
1405         /* Read some data and pass it to the sources */
1406         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1407             int work_done;
1408             pa_usec_t sleep_usec = 0;
1409             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1410
1411             if (u->first) {
1412                 pa_log_info("Starting capture.");
1413                 snd_pcm_start(u->pcm_handle);
1414
1415                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1416
1417                 u->first = FALSE;
1418             }
1419
1420             if (u->use_mmap)
1421                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1422             else
1423                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1424
1425             if (work_done < 0)
1426                 goto fail;
1427
1428 /*             pa_log_debug("work_done = %i", work_done); */
1429
1430             if (work_done)
1431                 update_smoother(u);
1432
1433             if (u->use_tsched) {
1434                 pa_usec_t cusec;
1435
1436                 /* OK, the capture buffer is now empty, let's
1437                  * calculate when to wake up next */
1438
1439 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1440
1441                 /* Convert from the sound card time domain to the
1442                  * system time domain */
1443                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1444
1445 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1446
1447                 /* We don't trust the conversion, so we wake up whatever comes first */
1448                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1449             }
1450         }
1451
1452         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1453             pa_usec_t volume_sleep;
1454             pa_source_volume_change_apply(u->source, &volume_sleep);
1455             if (volume_sleep > 0) {
1456                 if (rtpoll_sleep > 0)
1457                     rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1458                 else
1459                     rtpoll_sleep = volume_sleep;
1460             }
1461         }
1462
1463         if (rtpoll_sleep > 0)
1464             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1465         else
1466             pa_rtpoll_set_timer_disabled(u->rtpoll);
1467
1468         /* Hmm, nothing to do. Let's sleep */
1469         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1470             goto fail;
1471
1472         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1473             pa_source_volume_change_apply(u->source, NULL);
1474
1475         if (ret == 0)
1476             goto finish;
1477
1478         /* Tell ALSA about this and process its response */
1479         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1480             struct pollfd *pollfd;
1481             int err;
1482             unsigned n;
1483
1484             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1485
1486             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1487                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1488                 goto fail;
1489             }
1490
1491             if (revents & ~POLLIN) {
1492                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1493                     goto fail;
1494
1495                 u->first = TRUE;
1496                 revents = 0;
1497             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1498                 pa_log_debug("Wakeup from ALSA!");
1499
1500         } else
1501             revents = 0;
1502     }
1503
1504 fail:
1505     /* If this was no regular exit from the loop we have to continue
1506      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1507     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1508     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1509
1510 finish:
1511     pa_log_debug("Thread shutting down");
1512 }
1513
1514 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1515     const char *n;
1516     char *t;
1517
1518     pa_assert(data);
1519     pa_assert(ma);
1520     pa_assert(device_name);
1521
1522     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1523         pa_source_new_data_set_name(data, n);
1524         data->namereg_fail = TRUE;
1525         return;
1526     }
1527
1528     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1529         data->namereg_fail = TRUE;
1530     else {
1531         n = device_id ? device_id : device_name;
1532         data->namereg_fail = FALSE;
1533     }
1534
1535     if (mapping)
1536         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1537     else
1538         t = pa_sprintf_malloc("alsa_input.%s", n);
1539
1540     pa_source_new_data_set_name(data, t);
1541     pa_xfree(t);
1542 }
1543
1544 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1545
1546     if (!mapping && !element)
1547         return;
1548
1549     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1550         pa_log_info("Failed to find a working mixer device.");
1551         return;
1552     }
1553
1554     if (element) {
1555
1556         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1557             goto fail;
1558
1559         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1560             goto fail;
1561
1562         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1563         pa_alsa_path_dump(u->mixer_path);
1564     } else {
1565
1566         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1567             goto fail;
1568
1569         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1570     }
1571
1572     return;
1573
1574 fail:
1575
1576     if (u->mixer_path_set) {
1577         pa_alsa_path_set_free(u->mixer_path_set);
1578         u->mixer_path_set = NULL;
1579     } else if (u->mixer_path) {
1580         pa_alsa_path_free(u->mixer_path);
1581         u->mixer_path = NULL;
1582     }
1583
1584     if (u->mixer_handle) {
1585         snd_mixer_close(u->mixer_handle);
1586         u->mixer_handle = NULL;
1587     }
1588 }
1589
1590 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1591     pa_bool_t need_mixer_callback = FALSE;
1592
1593     pa_assert(u);
1594
1595     if (!u->mixer_handle)
1596         return 0;
1597
1598     if (u->source->active_port) {
1599         pa_alsa_port_data *data;
1600
1601         /* We have a list of supported paths, so let's activate the
1602          * one that has been chosen as active */
1603
1604         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1605         u->mixer_path = data->path;
1606
1607         pa_alsa_path_select(data->path, u->mixer_handle);
1608
1609         if (data->setting)
1610             pa_alsa_setting_select(data->setting, u->mixer_handle);
1611
1612     } else {
1613
1614         if (!u->mixer_path && u->mixer_path_set)
1615             u->mixer_path = u->mixer_path_set->paths;
1616
1617         if (u->mixer_path) {
1618             /* Hmm, we have only a single path, then let's activate it */
1619
1620             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1621
1622             if (u->mixer_path->settings)
1623                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1624         } else
1625             return 0;
1626     }
1627
1628     mixer_volume_init(u);
1629
1630     /* Will we need to register callbacks? */
1631     if (u->mixer_path_set && u->mixer_path_set->paths) {
1632         pa_alsa_path *p;
1633
1634         PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1635             if (p->has_volume || p->has_mute)
1636                 need_mixer_callback = TRUE;
1637         }
1638     }
1639     else if (u->mixer_path)
1640         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1641
1642     if (need_mixer_callback) {
1643         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1644         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1645             u->mixer_pd = pa_alsa_mixer_pdata_new();
1646             mixer_callback = io_mixer_callback;
1647
1648             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1649                 pa_log("Failed to initialize file descriptor monitoring");
1650                 return -1;
1651             }
1652         } else {
1653             u->mixer_fdl = pa_alsa_fdlist_new();
1654             mixer_callback = ctl_mixer_callback;
1655
1656             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1657                 pa_log("Failed to initialize file descriptor monitoring");
1658                 return -1;
1659             }
1660         }
1661
1662         if (u->mixer_path_set)
1663             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1664         else
1665             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1666     }
1667
1668     return 0;
1669 }
1670
1671 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1672
1673     struct userdata *u = NULL;
1674     const char *dev_id = NULL;
1675     pa_sample_spec ss;
1676     pa_channel_map map;
1677     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1678     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1679     size_t frame_size;
1680     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE;
1681     pa_source_new_data data;
1682     pa_alsa_profile_set *profile_set = NULL;
1683
1684     pa_assert(m);
1685     pa_assert(ma);
1686
1687     ss = m->core->default_sample_spec;
1688     map = m->core->default_channel_map;
1689     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1690         pa_log("Failed to parse sample specification and channel map");
1691         goto fail;
1692     }
1693
1694     frame_size = pa_frame_size(&ss);
1695
1696     nfrags = m->core->default_n_fragments;
1697     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1698     if (frag_size <= 0)
1699         frag_size = (uint32_t) frame_size;
1700     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1701     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1702
1703     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1704         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1705         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1706         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1707         pa_log("Failed to parse buffer metrics");
1708         goto fail;
1709     }
1710
1711     buffer_size = nfrags * frag_size;
1712
1713     period_frames = frag_size/frame_size;
1714     buffer_frames = buffer_size/frame_size;
1715     tsched_frames = tsched_size/frame_size;
1716
1717     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1718         pa_log("Failed to parse mmap argument.");
1719         goto fail;
1720     }
1721
1722     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1723         pa_log("Failed to parse tsched argument.");
1724         goto fail;
1725     }
1726
1727     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1728         pa_log("Failed to parse ignore_dB argument.");
1729         goto fail;
1730     }
1731
1732     deferred_volume = m->core->deferred_volume;
1733     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1734         pa_log("Failed to parse deferred_volume argument.");
1735         goto fail;
1736     }
1737
1738     use_tsched = pa_alsa_may_tsched(use_tsched);
1739
1740     u = pa_xnew0(struct userdata, 1);
1741     u->core = m->core;
1742     u->module = m;
1743     u->use_mmap = use_mmap;
1744     u->use_tsched = use_tsched;
1745     u->deferred_volume = deferred_volume;
1746     u->first = TRUE;
1747     u->rtpoll = pa_rtpoll_new();
1748     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1749
1750     u->smoother = pa_smoother_new(
1751             SMOOTHER_ADJUST_USEC,
1752             SMOOTHER_WINDOW_USEC,
1753             TRUE,
1754             TRUE,
1755             5,
1756             pa_rtclock_now(),
1757             TRUE);
1758     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1759
1760     dev_id = pa_modargs_get_value(
1761             ma, "device_id",
1762             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1763
1764     if (reserve_init(u, dev_id) < 0)
1765         goto fail;
1766
1767     if (reserve_monitor_init(u, dev_id) < 0)
1768         goto fail;
1769
1770     b = use_mmap;
1771     d = use_tsched;
1772
1773     if (mapping) {
1774
1775         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1776             pa_log("device_id= not set");
1777             goto fail;
1778         }
1779
1780         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1781                       dev_id,
1782                       &u->device_name,
1783                       &ss, &map,
1784                       SND_PCM_STREAM_CAPTURE,
1785                       &period_frames, &buffer_frames, tsched_frames,
1786                       &b, &d, mapping)))
1787             goto fail;
1788
1789     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1790
1791         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1792             goto fail;
1793
1794         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1795                       dev_id,
1796                       &u->device_name,
1797                       &ss, &map,
1798                       SND_PCM_STREAM_CAPTURE,
1799                       &period_frames, &buffer_frames, tsched_frames,
1800                       &b, &d, profile_set, &mapping)))
1801             goto fail;
1802
1803     } else {
1804
1805         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1806                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1807                       &u->device_name,
1808                       &ss, &map,
1809                       SND_PCM_STREAM_CAPTURE,
1810                       &period_frames, &buffer_frames, tsched_frames,
1811                       &b, &d, FALSE)))
1812             goto fail;
1813     }
1814
1815     pa_assert(u->device_name);
1816     pa_log_info("Successfully opened device %s.", u->device_name);
1817
1818     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1819         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1820         goto fail;
1821     }
1822
1823     if (mapping)
1824         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1825
1826     if (use_mmap && !b) {
1827         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1828         u->use_mmap = use_mmap = FALSE;
1829     }
1830
1831     if (use_tsched && (!b || !d)) {
1832         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1833         u->use_tsched = use_tsched = FALSE;
1834     }
1835
1836     if (u->use_mmap)
1837         pa_log_info("Successfully enabled mmap() mode.");
1838
1839     if (u->use_tsched)
1840         pa_log_info("Successfully enabled timer-based scheduling mode.");
1841
1842     /* ALSA might tweak the sample spec, so recalculate the frame size */
1843     frame_size = pa_frame_size(&ss);
1844
1845     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1846
1847     pa_source_new_data_init(&data);
1848     data.driver = driver;
1849     data.module = m;
1850     data.card = card;
1851     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1852
1853     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1854      * variable instead of using &data.namereg_fail directly, because
1855      * data.namereg_fail is a bitfield and taking the address of a bitfield
1856      * variable is impossible. */
1857     namereg_fail = data.namereg_fail;
1858     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1859         pa_log("Failed to parse namereg_fail argument.");
1860         pa_source_new_data_done(&data);
1861         goto fail;
1862     }
1863     data.namereg_fail = namereg_fail;
1864
1865     pa_source_new_data_set_sample_spec(&data, &ss);
1866     pa_source_new_data_set_channel_map(&data, &map);
1867
1868     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1869     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1870     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1871     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1872     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1873
1874     if (mapping) {
1875         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1876         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1877     }
1878
1879     pa_alsa_init_description(data.proplist);
1880
1881     if (u->control_device)
1882         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1883
1884     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1885         pa_log("Invalid properties");
1886         pa_source_new_data_done(&data);
1887         goto fail;
1888     }
1889
1890     if (u->mixer_path_set)
1891         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1892
1893     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1894     pa_source_new_data_done(&data);
1895
1896     if (!u->source) {
1897         pa_log("Failed to create source object");
1898         goto fail;
1899     }
1900
1901     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1902                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1903         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1904         goto fail;
1905     }
1906
1907     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1908                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1909         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1910         goto fail;
1911     }
1912
1913     u->source->parent.process_msg = source_process_msg;
1914     if (u->use_tsched)
1915         u->source->update_requested_latency = source_update_requested_latency_cb;
1916     u->source->set_state = source_set_state_cb;
1917     u->source->set_port = source_set_port_cb;
1918     u->source->userdata = u;
1919
1920     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1921     pa_source_set_rtpoll(u->source, u->rtpoll);
1922
1923     u->frame_size = frame_size;
1924     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1925     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1926     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1927
1928     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1929                 (double) u->hwbuf_size / (double) u->fragment_size,
1930                 (long unsigned) u->fragment_size,
1931                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1932                 (long unsigned) u->hwbuf_size,
1933                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1934
1935     if (u->use_tsched) {
1936         u->tsched_watermark_ref = tsched_watermark;
1937         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
1938     }
1939     else
1940         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1941
1942     reserve_update(u);
1943
1944     if (update_sw_params(u) < 0)
1945         goto fail;
1946
1947     if (setup_mixer(u, ignore_dB) < 0)
1948         goto fail;
1949
1950     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1951
1952     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1953         pa_log("Failed to create thread.");
1954         goto fail;
1955     }
1956
1957     /* Get initial mixer settings */
1958     if (data.volume_is_set) {
1959         if (u->source->set_volume)
1960             u->source->set_volume(u->source);
1961     } else {
1962         if (u->source->get_volume)
1963             u->source->get_volume(u->source);
1964     }
1965
1966     if (data.muted_is_set) {
1967         if (u->source->set_mute)
1968             u->source->set_mute(u->source);
1969     } else {
1970         if (u->source->get_mute)
1971             u->source->get_mute(u->source);
1972     }
1973
1974     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
1975         u->source->write_volume(u->source);
1976
1977     pa_source_put(u->source);
1978
1979     if (profile_set)
1980         pa_alsa_profile_set_free(profile_set);
1981
1982     return u->source;
1983
1984 fail:
1985
1986     if (u)
1987         userdata_free(u);
1988
1989     if (profile_set)
1990         pa_alsa_profile_set_free(profile_set);
1991
1992     return NULL;
1993 }
1994
1995 static void userdata_free(struct userdata *u) {
1996     pa_assert(u);
1997
1998     if (u->source)
1999         pa_source_unlink(u->source);
2000
2001     if (u->thread) {
2002         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
2003         pa_thread_free(u->thread);
2004     }
2005
2006     pa_thread_mq_done(&u->thread_mq);
2007
2008     if (u->source)
2009         pa_source_unref(u->source);
2010
2011     if (u->mixer_pd)
2012         pa_alsa_mixer_pdata_free(u->mixer_pd);
2013
2014     if (u->alsa_rtpoll_item)
2015         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2016
2017     if (u->rtpoll)
2018         pa_rtpoll_free(u->rtpoll);
2019
2020     if (u->pcm_handle) {
2021         snd_pcm_drop(u->pcm_handle);
2022         snd_pcm_close(u->pcm_handle);
2023     }
2024
2025     if (u->mixer_fdl)
2026         pa_alsa_fdlist_free(u->mixer_fdl);
2027
2028     if (u->mixer_path_set)
2029         pa_alsa_path_set_free(u->mixer_path_set);
2030     else if (u->mixer_path)
2031         pa_alsa_path_free(u->mixer_path);
2032
2033     if (u->mixer_handle)
2034         snd_mixer_close(u->mixer_handle);
2035
2036     if (u->smoother)
2037         pa_smoother_free(u->smoother);
2038
2039     reserve_done(u);
2040     monitor_done(u);
2041
2042     pa_xfree(u->device_name);
2043     pa_xfree(u->control_device);
2044     pa_xfree(u);
2045 }
2046
2047 void pa_alsa_source_free(pa_source *s) {
2048     struct userdata *u;
2049
2050     pa_source_assert_ref(s);
2051     pa_assert_se(u = s->userdata);
2052
2053     userdata_free(u);
2054 }