alsa: reset watermark to initial values on resume
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     size_t
102         frame_size,
103         fragment_size,
104         hwbuf_size,
105         tsched_watermark,
106         tsched_watermark_ref,
107         hwbuf_unused,
108         min_sleep,
109         min_wakeup,
110         watermark_inc_step,
111         watermark_dec_step,
112         watermark_inc_threshold,
113         watermark_dec_threshold;
114
115     pa_usec_t watermark_dec_not_before;
116     pa_usec_t min_latency_ref;
117
118     char *device_name;  /* name of the PCM device */
119     char *control_device; /* name of the control device */
120
121     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
122
123     pa_bool_t first;
124
125     pa_rtpoll_item *alsa_rtpoll_item;
126
127     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
128
129     pa_smoother *smoother;
130     uint64_t read_count;
131     pa_usec_t smoother_interval;
132     pa_usec_t last_smoother_update;
133
134     pa_reserve_wrapper *reserve;
135     pa_hook_slot *reserve_slot;
136     pa_reserve_monitor_wrapper *monitor;
137     pa_hook_slot *monitor_slot;
138 };
139
140 static void userdata_free(struct userdata *u);
141
142 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
143     pa_assert(r);
144     pa_assert(u);
145
146     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
147         return PA_HOOK_CANCEL;
148
149     return PA_HOOK_OK;
150 }
151
152 static void reserve_done(struct userdata *u) {
153     pa_assert(u);
154
155     if (u->reserve_slot) {
156         pa_hook_slot_free(u->reserve_slot);
157         u->reserve_slot = NULL;
158     }
159
160     if (u->reserve) {
161         pa_reserve_wrapper_unref(u->reserve);
162         u->reserve = NULL;
163     }
164 }
165
166 static void reserve_update(struct userdata *u) {
167     const char *description;
168     pa_assert(u);
169
170     if (!u->source || !u->reserve)
171         return;
172
173     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
174         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
175 }
176
177 static int reserve_init(struct userdata *u, const char *dname) {
178     char *rname;
179
180     pa_assert(u);
181     pa_assert(dname);
182
183     if (u->reserve)
184         return 0;
185
186     if (pa_in_system_mode())
187         return 0;
188
189     if (!(rname = pa_alsa_get_reserve_name(dname)))
190         return 0;
191
192     /* We are resuming, try to lock the device */
193     u->reserve = pa_reserve_wrapper_get(u->core, rname);
194     pa_xfree(rname);
195
196     if (!(u->reserve))
197         return -1;
198
199     reserve_update(u);
200
201     pa_assert(!u->reserve_slot);
202     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
203
204     return 0;
205 }
206
207 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
208     pa_bool_t b;
209
210     pa_assert(w);
211     pa_assert(u);
212
213     b = PA_PTR_TO_UINT(busy) && !u->reserve;
214
215     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
216     return PA_HOOK_OK;
217 }
218
219 static void monitor_done(struct userdata *u) {
220     pa_assert(u);
221
222     if (u->monitor_slot) {
223         pa_hook_slot_free(u->monitor_slot);
224         u->monitor_slot = NULL;
225     }
226
227     if (u->monitor) {
228         pa_reserve_monitor_wrapper_unref(u->monitor);
229         u->monitor = NULL;
230     }
231 }
232
233 static int reserve_monitor_init(struct userdata *u, const char *dname) {
234     char *rname;
235
236     pa_assert(u);
237     pa_assert(dname);
238
239     if (pa_in_system_mode())
240         return 0;
241
242     if (!(rname = pa_alsa_get_reserve_name(dname)))
243         return 0;
244
245     /* We are resuming, try to lock the device */
246     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
247     pa_xfree(rname);
248
249     if (!(u->monitor))
250         return -1;
251
252     pa_assert(!u->monitor_slot);
253     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
254
255     return 0;
256 }
257
258 static void fix_min_sleep_wakeup(struct userdata *u) {
259     size_t max_use, max_use_2;
260
261     pa_assert(u);
262     pa_assert(u->use_tsched);
263
264     max_use = u->hwbuf_size - u->hwbuf_unused;
265     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
266
267     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
268     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
269
270     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
271     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
272 }
273
274 static void fix_tsched_watermark(struct userdata *u) {
275     size_t max_use;
276     pa_assert(u);
277     pa_assert(u->use_tsched);
278
279     max_use = u->hwbuf_size - u->hwbuf_unused;
280
281     if (u->tsched_watermark > max_use - u->min_sleep)
282         u->tsched_watermark = max_use - u->min_sleep;
283
284     if (u->tsched_watermark < u->min_wakeup)
285         u->tsched_watermark = u->min_wakeup;
286 }
287
288 static void increase_watermark(struct userdata *u) {
289     size_t old_watermark;
290     pa_usec_t old_min_latency, new_min_latency;
291
292     pa_assert(u);
293     pa_assert(u->use_tsched);
294
295     /* First, just try to increase the watermark */
296     old_watermark = u->tsched_watermark;
297     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
298     fix_tsched_watermark(u);
299
300     if (old_watermark != u->tsched_watermark) {
301         pa_log_info("Increasing wakeup watermark to %0.2f ms",
302                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
303         return;
304     }
305
306     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
307     old_min_latency = u->source->thread_info.min_latency;
308     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
309     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
310
311     if (old_min_latency != new_min_latency) {
312         pa_log_info("Increasing minimal latency to %0.2f ms",
313                     (double) new_min_latency / PA_USEC_PER_MSEC);
314
315         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
316     }
317
318     /* When we reach this we're officialy fucked! */
319 }
320
321 static void decrease_watermark(struct userdata *u) {
322     size_t old_watermark;
323     pa_usec_t now;
324
325     pa_assert(u);
326     pa_assert(u->use_tsched);
327
328     now = pa_rtclock_now();
329
330     if (u->watermark_dec_not_before <= 0)
331         goto restart;
332
333     if (u->watermark_dec_not_before > now)
334         return;
335
336     old_watermark = u->tsched_watermark;
337
338     if (u->tsched_watermark < u->watermark_dec_step)
339         u->tsched_watermark = u->tsched_watermark / 2;
340     else
341         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
342
343     fix_tsched_watermark(u);
344
345     if (old_watermark != u->tsched_watermark)
346         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
347                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
348
349     /* We don't change the latency range*/
350
351 restart:
352     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
353 }
354
355 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
356     pa_usec_t wm, usec;
357
358     pa_assert(sleep_usec);
359     pa_assert(process_usec);
360
361     pa_assert(u);
362     pa_assert(u->use_tsched);
363
364     usec = pa_source_get_requested_latency_within_thread(u->source);
365
366     if (usec == (pa_usec_t) -1)
367         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
368
369     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
370
371     if (wm > usec)
372         wm = usec/2;
373
374     *sleep_usec = usec - wm;
375     *process_usec = wm;
376
377 #ifdef DEBUG_TIMING
378     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
379                  (unsigned long) (usec / PA_USEC_PER_MSEC),
380                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
381                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
382 #endif
383 }
384
385 static int try_recover(struct userdata *u, const char *call, int err) {
386     pa_assert(u);
387     pa_assert(call);
388     pa_assert(err < 0);
389
390     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
391
392     pa_assert(err != -EAGAIN);
393
394     if (err == -EPIPE)
395         pa_log_debug("%s: Buffer overrun!", call);
396
397     if (err == -ESTRPIPE)
398         pa_log_debug("%s: System suspended!", call);
399
400     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
401         pa_log("%s: %s", call, pa_alsa_strerror(err));
402         return -1;
403     }
404
405     u->first = TRUE;
406     return 0;
407 }
408
409 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
410     size_t left_to_record;
411     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
412     pa_bool_t overrun = FALSE;
413
414     /* We use <= instead of < for this check here because an overrun
415      * only happens after the last sample was processed, not already when
416      * it is removed from the buffer. This is particularly important
417      * when block transfer is used. */
418
419     if (n_bytes <= rec_space)
420         left_to_record = rec_space - n_bytes;
421     else {
422
423         /* We got a dropout. What a mess! */
424         left_to_record = 0;
425         overrun = TRUE;
426
427 #ifdef DEBUG_TIMING
428         PA_DEBUG_TRAP;
429 #endif
430
431         if (pa_log_ratelimit(PA_LOG_INFO))
432             pa_log_info("Overrun!");
433     }
434
435 #ifdef DEBUG_TIMING
436     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
437 #endif
438
439     if (u->use_tsched) {
440         pa_bool_t reset_not_before = TRUE;
441
442         if (overrun || left_to_record < u->watermark_inc_threshold)
443             increase_watermark(u);
444         else if (left_to_record > u->watermark_dec_threshold) {
445             reset_not_before = FALSE;
446
447             /* We decrease the watermark only if have actually
448              * been woken up by a timeout. If something else woke
449              * us up it's too easy to fulfill the deadlines... */
450
451             if (on_timeout)
452                 decrease_watermark(u);
453         }
454
455         if (reset_not_before)
456             u->watermark_dec_not_before = 0;
457     }
458
459     return left_to_record;
460 }
461
462 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
463     pa_bool_t work_done = FALSE;
464     pa_usec_t max_sleep_usec = 0, process_usec = 0;
465     size_t left_to_record;
466     unsigned j = 0;
467
468     pa_assert(u);
469     pa_source_assert_ref(u->source);
470
471     if (u->use_tsched)
472         hw_sleep_time(u, &max_sleep_usec, &process_usec);
473
474     for (;;) {
475         snd_pcm_sframes_t n;
476         size_t n_bytes;
477         int r;
478         pa_bool_t after_avail = TRUE;
479
480         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
481
482             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
483                 continue;
484
485             return r;
486         }
487
488         n_bytes = (size_t) n * u->frame_size;
489
490 #ifdef DEBUG_TIMING
491         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
492 #endif
493
494         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
495         on_timeout = FALSE;
496
497         if (u->use_tsched)
498             if (!polled &&
499                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
500 #ifdef DEBUG_TIMING
501                 pa_log_debug("Not reading, because too early.");
502 #endif
503                 break;
504             }
505
506         if (PA_UNLIKELY(n_bytes <= 0)) {
507
508             if (polled)
509                 PA_ONCE_BEGIN {
510                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
511                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
512                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
513                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
514                            pa_strnull(dn));
515                     pa_xfree(dn);
516                 } PA_ONCE_END;
517
518 #ifdef DEBUG_TIMING
519             pa_log_debug("Not reading, because not necessary.");
520 #endif
521             break;
522         }
523
524
525         if (++j > 10) {
526 #ifdef DEBUG_TIMING
527             pa_log_debug("Not filling up, because already too many iterations.");
528 #endif
529
530             break;
531         }
532
533         polled = FALSE;
534
535 #ifdef DEBUG_TIMING
536         pa_log_debug("Reading");
537 #endif
538
539         for (;;) {
540             pa_memchunk chunk;
541             void *p;
542             int err;
543             const snd_pcm_channel_area_t *areas;
544             snd_pcm_uframes_t offset, frames;
545             snd_pcm_sframes_t sframes;
546
547             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
548 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
549
550             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
551
552                 if (!after_avail && err == -EAGAIN)
553                     break;
554
555                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
556                     continue;
557
558                 return r;
559             }
560
561             /* Make sure that if these memblocks need to be copied they will fit into one slot */
562             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
563                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
564
565             if (!after_avail && frames == 0)
566                 break;
567
568             pa_assert(frames > 0);
569             after_avail = FALSE;
570
571             /* Check these are multiples of 8 bit */
572             pa_assert((areas[0].first & 7) == 0);
573             pa_assert((areas[0].step & 7)== 0);
574
575             /* We assume a single interleaved memory buffer */
576             pa_assert((areas[0].first >> 3) == 0);
577             pa_assert((areas[0].step >> 3) == u->frame_size);
578
579             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
580
581             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
582             chunk.length = pa_memblock_get_length(chunk.memblock);
583             chunk.index = 0;
584
585             pa_source_post(u->source, &chunk);
586             pa_memblock_unref_fixed(chunk.memblock);
587
588             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
589
590                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
591                     continue;
592
593                 return r;
594             }
595
596             work_done = TRUE;
597
598             u->read_count += frames * u->frame_size;
599
600 #ifdef DEBUG_TIMING
601             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
602 #endif
603
604             if ((size_t) frames * u->frame_size >= n_bytes)
605                 break;
606
607             n_bytes -= (size_t) frames * u->frame_size;
608         }
609     }
610
611     if (u->use_tsched) {
612         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
613         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
614
615         if (*sleep_usec > process_usec)
616             *sleep_usec -= process_usec;
617         else
618             *sleep_usec = 0;
619     }
620
621     return work_done ? 1 : 0;
622 }
623
624 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
625     int work_done = FALSE;
626     pa_usec_t max_sleep_usec = 0, process_usec = 0;
627     size_t left_to_record;
628     unsigned j = 0;
629
630     pa_assert(u);
631     pa_source_assert_ref(u->source);
632
633     if (u->use_tsched)
634         hw_sleep_time(u, &max_sleep_usec, &process_usec);
635
636     for (;;) {
637         snd_pcm_sframes_t n;
638         size_t n_bytes;
639         int r;
640         pa_bool_t after_avail = TRUE;
641
642         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
643
644             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
645                 continue;
646
647             return r;
648         }
649
650         n_bytes = (size_t) n * u->frame_size;
651         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
652         on_timeout = FALSE;
653
654         if (u->use_tsched)
655             if (!polled &&
656                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
657                 break;
658
659         if (PA_UNLIKELY(n_bytes <= 0)) {
660
661             if (polled)
662                 PA_ONCE_BEGIN {
663                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
664                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
665                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
666                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
667                            pa_strnull(dn));
668                     pa_xfree(dn);
669                 } PA_ONCE_END;
670
671             break;
672         }
673
674         if (++j > 10) {
675 #ifdef DEBUG_TIMING
676             pa_log_debug("Not filling up, because already too many iterations.");
677 #endif
678
679             break;
680         }
681
682         polled = FALSE;
683
684         for (;;) {
685             void *p;
686             snd_pcm_sframes_t frames;
687             pa_memchunk chunk;
688
689             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
690
691             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
692
693             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
694                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
695
696 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
697
698             p = pa_memblock_acquire(chunk.memblock);
699             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
700             pa_memblock_release(chunk.memblock);
701
702             if (PA_UNLIKELY(frames < 0)) {
703                 pa_memblock_unref(chunk.memblock);
704
705                 if (!after_avail && (int) frames == -EAGAIN)
706                     break;
707
708                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
709                     continue;
710
711                 return r;
712             }
713
714             if (!after_avail && frames == 0) {
715                 pa_memblock_unref(chunk.memblock);
716                 break;
717             }
718
719             pa_assert(frames > 0);
720             after_avail = FALSE;
721
722             chunk.index = 0;
723             chunk.length = (size_t) frames * u->frame_size;
724
725             pa_source_post(u->source, &chunk);
726             pa_memblock_unref(chunk.memblock);
727
728             work_done = TRUE;
729
730             u->read_count += frames * u->frame_size;
731
732 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
733
734             if ((size_t) frames * u->frame_size >= n_bytes)
735                 break;
736
737             n_bytes -= (size_t) frames * u->frame_size;
738         }
739     }
740
741     if (u->use_tsched) {
742         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
743         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
744
745         if (*sleep_usec > process_usec)
746             *sleep_usec -= process_usec;
747         else
748             *sleep_usec = 0;
749     }
750
751     return work_done ? 1 : 0;
752 }
753
754 static void update_smoother(struct userdata *u) {
755     snd_pcm_sframes_t delay = 0;
756     uint64_t position;
757     int err;
758     pa_usec_t now1 = 0, now2;
759     snd_pcm_status_t *status;
760
761     snd_pcm_status_alloca(&status);
762
763     pa_assert(u);
764     pa_assert(u->pcm_handle);
765
766     /* Let's update the time smoother */
767
768     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
769         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
770         return;
771     }
772
773     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
774         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
775     else {
776         snd_htimestamp_t htstamp = { 0, 0 };
777         snd_pcm_status_get_htstamp(status, &htstamp);
778         now1 = pa_timespec_load(&htstamp);
779     }
780
781     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
782     if (now1 <= 0)
783         now1 = pa_rtclock_now();
784
785     /* check if the time since the last update is bigger than the interval */
786     if (u->last_smoother_update > 0)
787         if (u->last_smoother_update + u->smoother_interval > now1)
788             return;
789
790     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
791     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
792
793     pa_smoother_put(u->smoother, now1, now2);
794
795     u->last_smoother_update = now1;
796     /* exponentially increase the update interval up to the MAX limit */
797     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
798 }
799
800 static pa_usec_t source_get_latency(struct userdata *u) {
801     int64_t delay;
802     pa_usec_t now1, now2;
803
804     pa_assert(u);
805
806     now1 = pa_rtclock_now();
807     now2 = pa_smoother_get(u->smoother, now1);
808
809     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
810
811     return delay >= 0 ? (pa_usec_t) delay : 0;
812 }
813
814 static int build_pollfd(struct userdata *u) {
815     pa_assert(u);
816     pa_assert(u->pcm_handle);
817
818     if (u->alsa_rtpoll_item)
819         pa_rtpoll_item_free(u->alsa_rtpoll_item);
820
821     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
822         return -1;
823
824     return 0;
825 }
826
827 /* Called from IO context */
828 static int suspend(struct userdata *u) {
829     pa_assert(u);
830     pa_assert(u->pcm_handle);
831
832     pa_smoother_pause(u->smoother, pa_rtclock_now());
833
834     /* Let's suspend */
835     snd_pcm_close(u->pcm_handle);
836     u->pcm_handle = NULL;
837
838     if (u->alsa_rtpoll_item) {
839         pa_rtpoll_item_free(u->alsa_rtpoll_item);
840         u->alsa_rtpoll_item = NULL;
841     }
842
843     pa_log_info("Device suspended...");
844
845     return 0;
846 }
847
848 /* Called from IO context */
849 static int update_sw_params(struct userdata *u) {
850     snd_pcm_uframes_t avail_min;
851     int err;
852
853     pa_assert(u);
854
855     /* Use the full buffer if no one asked us for anything specific */
856     u->hwbuf_unused = 0;
857
858     if (u->use_tsched) {
859         pa_usec_t latency;
860
861         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
862             size_t b;
863
864             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
865
866             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
867
868             /* We need at least one sample in our buffer */
869
870             if (PA_UNLIKELY(b < u->frame_size))
871                 b = u->frame_size;
872
873             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
874         }
875
876         fix_min_sleep_wakeup(u);
877         fix_tsched_watermark(u);
878     }
879
880     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
881
882     avail_min = 1;
883
884     if (u->use_tsched) {
885         pa_usec_t sleep_usec, process_usec;
886
887         hw_sleep_time(u, &sleep_usec, &process_usec);
888         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
889     }
890
891     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
892
893     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
894         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
895         return err;
896     }
897
898     return 0;
899 }
900
901 /* Called from IO Context on unsuspend or from main thread when creating source */
902 static void reset_watermark(struct userdata *u, size_t tsched_watermark, pa_sample_spec *ss,
903                             pa_bool_t in_thread)
904 {
905     u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, ss),
906                                                     &u->source->sample_spec);
907
908     u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
909     u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
910
911     u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
912     u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
913
914     fix_min_sleep_wakeup(u);
915     fix_tsched_watermark(u);
916
917     if (in_thread)
918         pa_source_set_latency_range_within_thread(u->source,
919                                                   u->min_latency_ref,
920                                                   pa_bytes_to_usec(u->hwbuf_size, ss));
921     else {
922         pa_source_set_latency_range(u->source,
923                                     0,
924                                     pa_bytes_to_usec(u->hwbuf_size, ss));
925
926         /* work-around assert in pa_source_set_latency_within_thead,
927            keep track of min_latency and reuse it when
928            this routine is called from IO context */
929         u->min_latency_ref = u->source->thread_info.min_latency;
930     }
931
932     pa_log_info("Time scheduling watermark is %0.2fms",
933                 (double) pa_bytes_to_usec(u->tsched_watermark, ss) / PA_USEC_PER_MSEC);
934 }
935
936 /* Called from IO context */
937 static int unsuspend(struct userdata *u) {
938     pa_sample_spec ss;
939     int err;
940     pa_bool_t b, d;
941     snd_pcm_uframes_t period_size, buffer_size;
942
943     pa_assert(u);
944     pa_assert(!u->pcm_handle);
945
946     pa_log_info("Trying resume...");
947
948     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
949                             SND_PCM_NONBLOCK|
950                             SND_PCM_NO_AUTO_RESAMPLE|
951                             SND_PCM_NO_AUTO_CHANNELS|
952                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
953         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
954         goto fail;
955     }
956
957     ss = u->source->sample_spec;
958     period_size = u->fragment_size / u->frame_size;
959     buffer_size = u->hwbuf_size / u->frame_size;
960     b = u->use_mmap;
961     d = u->use_tsched;
962
963     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
964         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
965         goto fail;
966     }
967
968     if (b != u->use_mmap || d != u->use_tsched) {
969         pa_log_warn("Resume failed, couldn't get original access mode.");
970         goto fail;
971     }
972
973     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
974         pa_log_warn("Resume failed, couldn't restore original sample settings.");
975         goto fail;
976     }
977
978     if (period_size*u->frame_size != u->fragment_size ||
979         buffer_size*u->frame_size != u->hwbuf_size) {
980         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
981                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
982                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
983         goto fail;
984     }
985
986     if (update_sw_params(u) < 0)
987         goto fail;
988
989     if (build_pollfd(u) < 0)
990         goto fail;
991
992     /* FIXME: We need to reload the volume somehow */
993
994     u->read_count = 0;
995     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
996     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
997     u->last_smoother_update = 0;
998
999     u->first = TRUE;
1000
1001     /* reset the watermark to the value defined when source was created */
1002     if (u->use_tsched)
1003         reset_watermark(u, u->tsched_watermark_ref, &u->source->sample_spec, TRUE);
1004
1005     pa_log_info("Resumed successfully...");
1006
1007     return 0;
1008
1009 fail:
1010     if (u->pcm_handle) {
1011         snd_pcm_close(u->pcm_handle);
1012         u->pcm_handle = NULL;
1013     }
1014
1015     return -PA_ERR_IO;
1016 }
1017
1018 /* Called from IO context */
1019 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1020     struct userdata *u = PA_SOURCE(o)->userdata;
1021
1022     switch (code) {
1023
1024         case PA_SOURCE_MESSAGE_GET_LATENCY: {
1025             pa_usec_t r = 0;
1026
1027             if (u->pcm_handle)
1028                 r = source_get_latency(u);
1029
1030             *((pa_usec_t*) data) = r;
1031
1032             return 0;
1033         }
1034
1035         case PA_SOURCE_MESSAGE_SET_STATE:
1036
1037             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
1038
1039                 case PA_SOURCE_SUSPENDED: {
1040                     int r;
1041
1042                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1043
1044                     if ((r = suspend(u)) < 0)
1045                         return r;
1046
1047                     break;
1048                 }
1049
1050                 case PA_SOURCE_IDLE:
1051                 case PA_SOURCE_RUNNING: {
1052                     int r;
1053
1054                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1055                         if (build_pollfd(u) < 0)
1056                             return -PA_ERR_IO;
1057                     }
1058
1059                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1060                         if ((r = unsuspend(u)) < 0)
1061                             return r;
1062                     }
1063
1064                     break;
1065                 }
1066
1067                 case PA_SOURCE_UNLINKED:
1068                 case PA_SOURCE_INIT:
1069                 case PA_SOURCE_INVALID_STATE:
1070                     ;
1071             }
1072
1073             break;
1074     }
1075
1076     return pa_source_process_msg(o, code, data, offset, chunk);
1077 }
1078
1079 /* Called from main context */
1080 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1081     pa_source_state_t old_state;
1082     struct userdata *u;
1083
1084     pa_source_assert_ref(s);
1085     pa_assert_se(u = s->userdata);
1086
1087     old_state = pa_source_get_state(u->source);
1088
1089     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1090         reserve_done(u);
1091     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1092         if (reserve_init(u, u->device_name) < 0)
1093             return -PA_ERR_BUSY;
1094
1095     return 0;
1096 }
1097
1098 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1099     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1100
1101     pa_assert(u);
1102     pa_assert(u->mixer_handle);
1103
1104     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1105         return 0;
1106
1107     if (!PA_SOURCE_IS_LINKED(u->source->state))
1108         return 0;
1109
1110     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1111         return 0;
1112
1113     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1114         pa_source_get_volume(u->source, TRUE);
1115         pa_source_get_mute(u->source, TRUE);
1116     }
1117
1118     return 0;
1119 }
1120
1121 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1122     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1123
1124     pa_assert(u);
1125     pa_assert(u->mixer_handle);
1126
1127     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1128         return 0;
1129
1130     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1131         return 0;
1132
1133     if (mask & SND_CTL_EVENT_MASK_VALUE)
1134         pa_source_update_volume_and_mute(u->source);
1135
1136     return 0;
1137 }
1138
1139 static void source_get_volume_cb(pa_source *s) {
1140     struct userdata *u = s->userdata;
1141     pa_cvolume r;
1142     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1143
1144     pa_assert(u);
1145     pa_assert(u->mixer_path);
1146     pa_assert(u->mixer_handle);
1147
1148     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1149         return;
1150
1151     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1152     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1153
1154     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1155
1156     if (u->mixer_path->has_dB) {
1157         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1158
1159         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1160     }
1161
1162     if (pa_cvolume_equal(&u->hardware_volume, &r))
1163         return;
1164
1165     s->real_volume = u->hardware_volume = r;
1166
1167     /* Hmm, so the hardware volume changed, let's reset our software volume */
1168     if (u->mixer_path->has_dB)
1169         pa_source_set_soft_volume(s, NULL);
1170 }
1171
1172 static void source_set_volume_cb(pa_source *s) {
1173     struct userdata *u = s->userdata;
1174     pa_cvolume r;
1175     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1176     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1177
1178     pa_assert(u);
1179     pa_assert(u->mixer_path);
1180     pa_assert(u->mixer_handle);
1181
1182     /* Shift up by the base volume */
1183     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1184
1185     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1186         return;
1187
1188     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1189     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1190
1191     u->hardware_volume = r;
1192
1193     if (u->mixer_path->has_dB) {
1194         pa_cvolume new_soft_volume;
1195         pa_bool_t accurate_enough;
1196         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1197
1198         /* Match exactly what the user requested by software */
1199         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1200
1201         /* If the adjustment to do in software is only minimal we
1202          * can skip it. That saves us CPU at the expense of a bit of
1203          * accuracy */
1204         accurate_enough =
1205             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1206             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1207
1208         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1209         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1210         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1211         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1212         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1213                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1214                      pa_yes_no(accurate_enough));
1215         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1216
1217         if (!accurate_enough)
1218             s->soft_volume = new_soft_volume;
1219
1220     } else {
1221         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1222
1223         /* We can't match exactly what the user requested, hence let's
1224          * at least tell the user about it */
1225
1226         s->real_volume = r;
1227     }
1228 }
1229
1230 static void source_write_volume_cb(pa_source *s) {
1231     struct userdata *u = s->userdata;
1232     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1233
1234     pa_assert(u);
1235     pa_assert(u->mixer_path);
1236     pa_assert(u->mixer_handle);
1237     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1238
1239     /* Shift up by the base volume */
1240     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1241
1242     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1243         pa_log_error("Writing HW volume failed");
1244     else {
1245         pa_cvolume tmp_vol;
1246         pa_bool_t accurate_enough;
1247
1248         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1249         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1250
1251         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1252         accurate_enough =
1253             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1254             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1255
1256         if (!accurate_enough) {
1257             union {
1258                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1259                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1260             } vol;
1261
1262             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1263                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1264                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1265             pa_log_debug("                                           in dB: %s (request) != %s",
1266                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1267                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1268         }
1269     }
1270 }
1271
1272 static void source_get_mute_cb(pa_source *s) {
1273     struct userdata *u = s->userdata;
1274     pa_bool_t b;
1275
1276     pa_assert(u);
1277     pa_assert(u->mixer_path);
1278     pa_assert(u->mixer_handle);
1279
1280     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1281         return;
1282
1283     s->muted = b;
1284 }
1285
1286 static void source_set_mute_cb(pa_source *s) {
1287     struct userdata *u = s->userdata;
1288
1289     pa_assert(u);
1290     pa_assert(u->mixer_path);
1291     pa_assert(u->mixer_handle);
1292
1293     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1294 }
1295
1296 static void mixer_volume_init(struct userdata *u) {
1297     pa_assert(u);
1298
1299     if (!u->mixer_path->has_volume) {
1300         pa_source_set_write_volume_callback(u->source, NULL);
1301         pa_source_set_get_volume_callback(u->source, NULL);
1302         pa_source_set_set_volume_callback(u->source, NULL);
1303
1304         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1305     } else {
1306         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1307         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1308
1309         if (u->mixer_path->has_dB && u->deferred_volume) {
1310             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1311             pa_log_info("Successfully enabled synchronous volume.");
1312         } else
1313             pa_source_set_write_volume_callback(u->source, NULL);
1314
1315         if (u->mixer_path->has_dB) {
1316             pa_source_enable_decibel_volume(u->source, TRUE);
1317             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1318
1319             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1320             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1321
1322             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1323         } else {
1324             pa_source_enable_decibel_volume(u->source, FALSE);
1325             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1326
1327             u->source->base_volume = PA_VOLUME_NORM;
1328             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1329         }
1330
1331         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1332     }
1333
1334     if (!u->mixer_path->has_mute) {
1335         pa_source_set_get_mute_callback(u->source, NULL);
1336         pa_source_set_set_mute_callback(u->source, NULL);
1337         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1338     } else {
1339         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1340         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1341         pa_log_info("Using hardware mute control.");
1342     }
1343 }
1344
1345 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1346     struct userdata *u = s->userdata;
1347     pa_alsa_port_data *data;
1348
1349     pa_assert(u);
1350     pa_assert(p);
1351     pa_assert(u->mixer_handle);
1352
1353     data = PA_DEVICE_PORT_DATA(p);
1354
1355     pa_assert_se(u->mixer_path = data->path);
1356     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1357
1358     mixer_volume_init(u);
1359
1360     if (data->setting)
1361         pa_alsa_setting_select(data->setting, u->mixer_handle);
1362
1363     if (s->set_mute)
1364         s->set_mute(s);
1365     if (s->set_volume)
1366         s->set_volume(s);
1367
1368     return 0;
1369 }
1370
1371 static void source_update_requested_latency_cb(pa_source *s) {
1372     struct userdata *u = s->userdata;
1373     pa_assert(u);
1374     pa_assert(u->use_tsched); /* only when timer scheduling is used
1375                                * we can dynamically adjust the
1376                                * latency */
1377
1378     if (!u->pcm_handle)
1379         return;
1380
1381     update_sw_params(u);
1382 }
1383
1384 static void thread_func(void *userdata) {
1385     struct userdata *u = userdata;
1386     unsigned short revents = 0;
1387
1388     pa_assert(u);
1389
1390     pa_log_debug("Thread starting up");
1391
1392     if (u->core->realtime_scheduling)
1393         pa_make_realtime(u->core->realtime_priority);
1394
1395     pa_thread_mq_install(&u->thread_mq);
1396
1397     for (;;) {
1398         int ret;
1399         pa_usec_t rtpoll_sleep = 0;
1400
1401 #ifdef DEBUG_TIMING
1402         pa_log_debug("Loop");
1403 #endif
1404
1405         /* Read some data and pass it to the sources */
1406         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1407             int work_done;
1408             pa_usec_t sleep_usec = 0;
1409             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1410
1411             if (u->first) {
1412                 pa_log_info("Starting capture.");
1413                 snd_pcm_start(u->pcm_handle);
1414
1415                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1416
1417                 u->first = FALSE;
1418             }
1419
1420             if (u->use_mmap)
1421                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1422             else
1423                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1424
1425             if (work_done < 0)
1426                 goto fail;
1427
1428 /*             pa_log_debug("work_done = %i", work_done); */
1429
1430             if (work_done)
1431                 update_smoother(u);
1432
1433             if (u->use_tsched) {
1434                 pa_usec_t cusec;
1435
1436                 /* OK, the capture buffer is now empty, let's
1437                  * calculate when to wake up next */
1438
1439 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1440
1441                 /* Convert from the sound card time domain to the
1442                  * system time domain */
1443                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1444
1445 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1446
1447                 /* We don't trust the conversion, so we wake up whatever comes first */
1448                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1449             }
1450         }
1451
1452         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1453             pa_usec_t volume_sleep;
1454             pa_source_volume_change_apply(u->source, &volume_sleep);
1455             if (volume_sleep > 0)
1456                 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1457         }
1458
1459         if (rtpoll_sleep > 0)
1460             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1461         else
1462             pa_rtpoll_set_timer_disabled(u->rtpoll);
1463
1464         /* Hmm, nothing to do. Let's sleep */
1465         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1466             goto fail;
1467
1468         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1469             pa_source_volume_change_apply(u->source, NULL);
1470
1471         if (ret == 0)
1472             goto finish;
1473
1474         /* Tell ALSA about this and process its response */
1475         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1476             struct pollfd *pollfd;
1477             int err;
1478             unsigned n;
1479
1480             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1481
1482             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1483                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1484                 goto fail;
1485             }
1486
1487             if (revents & ~POLLIN) {
1488                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1489                     goto fail;
1490
1491                 u->first = TRUE;
1492                 revents = 0;
1493             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1494                 pa_log_debug("Wakeup from ALSA!");
1495
1496         } else
1497             revents = 0;
1498     }
1499
1500 fail:
1501     /* If this was no regular exit from the loop we have to continue
1502      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1503     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1504     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1505
1506 finish:
1507     pa_log_debug("Thread shutting down");
1508 }
1509
1510 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1511     const char *n;
1512     char *t;
1513
1514     pa_assert(data);
1515     pa_assert(ma);
1516     pa_assert(device_name);
1517
1518     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1519         pa_source_new_data_set_name(data, n);
1520         data->namereg_fail = TRUE;
1521         return;
1522     }
1523
1524     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1525         data->namereg_fail = TRUE;
1526     else {
1527         n = device_id ? device_id : device_name;
1528         data->namereg_fail = FALSE;
1529     }
1530
1531     if (mapping)
1532         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1533     else
1534         t = pa_sprintf_malloc("alsa_input.%s", n);
1535
1536     pa_source_new_data_set_name(data, t);
1537     pa_xfree(t);
1538 }
1539
1540 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1541
1542     if (!mapping && !element)
1543         return;
1544
1545     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1546         pa_log_info("Failed to find a working mixer device.");
1547         return;
1548     }
1549
1550     if (element) {
1551
1552         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1553             goto fail;
1554
1555         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1556             goto fail;
1557
1558         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1559         pa_alsa_path_dump(u->mixer_path);
1560     } else {
1561
1562         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1563             goto fail;
1564
1565         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1566     }
1567
1568     return;
1569
1570 fail:
1571
1572     if (u->mixer_path_set) {
1573         pa_alsa_path_set_free(u->mixer_path_set);
1574         u->mixer_path_set = NULL;
1575     } else if (u->mixer_path) {
1576         pa_alsa_path_free(u->mixer_path);
1577         u->mixer_path = NULL;
1578     }
1579
1580     if (u->mixer_handle) {
1581         snd_mixer_close(u->mixer_handle);
1582         u->mixer_handle = NULL;
1583     }
1584 }
1585
1586 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1587     pa_bool_t need_mixer_callback = FALSE;
1588
1589     pa_assert(u);
1590
1591     if (!u->mixer_handle)
1592         return 0;
1593
1594     if (u->source->active_port) {
1595         pa_alsa_port_data *data;
1596
1597         /* We have a list of supported paths, so let's activate the
1598          * one that has been chosen as active */
1599
1600         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1601         u->mixer_path = data->path;
1602
1603         pa_alsa_path_select(data->path, u->mixer_handle);
1604
1605         if (data->setting)
1606             pa_alsa_setting_select(data->setting, u->mixer_handle);
1607
1608     } else {
1609
1610         if (!u->mixer_path && u->mixer_path_set)
1611             u->mixer_path = u->mixer_path_set->paths;
1612
1613         if (u->mixer_path) {
1614             /* Hmm, we have only a single path, then let's activate it */
1615
1616             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1617
1618             if (u->mixer_path->settings)
1619                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1620         } else
1621             return 0;
1622     }
1623
1624     mixer_volume_init(u);
1625
1626     /* Will we need to register callbacks? */
1627     if (u->mixer_path_set && u->mixer_path_set->paths) {
1628         pa_alsa_path *p;
1629
1630         PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1631             if (p->has_volume || p->has_mute)
1632                 need_mixer_callback = TRUE;
1633         }
1634     }
1635     else if (u->mixer_path)
1636         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1637
1638     if (need_mixer_callback) {
1639         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1640         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1641             u->mixer_pd = pa_alsa_mixer_pdata_new();
1642             mixer_callback = io_mixer_callback;
1643
1644             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1645                 pa_log("Failed to initialize file descriptor monitoring");
1646                 return -1;
1647             }
1648         } else {
1649             u->mixer_fdl = pa_alsa_fdlist_new();
1650             mixer_callback = ctl_mixer_callback;
1651
1652             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1653                 pa_log("Failed to initialize file descriptor monitoring");
1654                 return -1;
1655             }
1656         }
1657
1658         if (u->mixer_path_set)
1659             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1660         else
1661             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1662     }
1663
1664     return 0;
1665 }
1666
1667 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1668
1669     struct userdata *u = NULL;
1670     const char *dev_id = NULL;
1671     pa_sample_spec ss;
1672     pa_channel_map map;
1673     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1674     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1675     size_t frame_size;
1676     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE;
1677     pa_source_new_data data;
1678     pa_alsa_profile_set *profile_set = NULL;
1679
1680     pa_assert(m);
1681     pa_assert(ma);
1682
1683     ss = m->core->default_sample_spec;
1684     map = m->core->default_channel_map;
1685     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1686         pa_log("Failed to parse sample specification and channel map");
1687         goto fail;
1688     }
1689
1690     frame_size = pa_frame_size(&ss);
1691
1692     nfrags = m->core->default_n_fragments;
1693     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1694     if (frag_size <= 0)
1695         frag_size = (uint32_t) frame_size;
1696     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1697     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1698
1699     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1700         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1701         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1702         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1703         pa_log("Failed to parse buffer metrics");
1704         goto fail;
1705     }
1706
1707     buffer_size = nfrags * frag_size;
1708
1709     period_frames = frag_size/frame_size;
1710     buffer_frames = buffer_size/frame_size;
1711     tsched_frames = tsched_size/frame_size;
1712
1713     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1714         pa_log("Failed to parse mmap argument.");
1715         goto fail;
1716     }
1717
1718     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1719         pa_log("Failed to parse tsched argument.");
1720         goto fail;
1721     }
1722
1723     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1724         pa_log("Failed to parse ignore_dB argument.");
1725         goto fail;
1726     }
1727
1728     deferred_volume = m->core->deferred_volume;
1729     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1730         pa_log("Failed to parse deferred_volume argument.");
1731         goto fail;
1732     }
1733
1734     use_tsched = pa_alsa_may_tsched(use_tsched);
1735
1736     u = pa_xnew0(struct userdata, 1);
1737     u->core = m->core;
1738     u->module = m;
1739     u->use_mmap = use_mmap;
1740     u->use_tsched = use_tsched;
1741     u->deferred_volume = deferred_volume;
1742     u->first = TRUE;
1743     u->rtpoll = pa_rtpoll_new();
1744     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1745
1746     u->smoother = pa_smoother_new(
1747             SMOOTHER_ADJUST_USEC,
1748             SMOOTHER_WINDOW_USEC,
1749             TRUE,
1750             TRUE,
1751             5,
1752             pa_rtclock_now(),
1753             TRUE);
1754     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1755
1756     dev_id = pa_modargs_get_value(
1757             ma, "device_id",
1758             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1759
1760     if (reserve_init(u, dev_id) < 0)
1761         goto fail;
1762
1763     if (reserve_monitor_init(u, dev_id) < 0)
1764         goto fail;
1765
1766     b = use_mmap;
1767     d = use_tsched;
1768
1769     if (mapping) {
1770
1771         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1772             pa_log("device_id= not set");
1773             goto fail;
1774         }
1775
1776         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1777                       dev_id,
1778                       &u->device_name,
1779                       &ss, &map,
1780                       SND_PCM_STREAM_CAPTURE,
1781                       &period_frames, &buffer_frames, tsched_frames,
1782                       &b, &d, mapping)))
1783             goto fail;
1784
1785     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1786
1787         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1788             goto fail;
1789
1790         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1791                       dev_id,
1792                       &u->device_name,
1793                       &ss, &map,
1794                       SND_PCM_STREAM_CAPTURE,
1795                       &period_frames, &buffer_frames, tsched_frames,
1796                       &b, &d, profile_set, &mapping)))
1797             goto fail;
1798
1799     } else {
1800
1801         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1802                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1803                       &u->device_name,
1804                       &ss, &map,
1805                       SND_PCM_STREAM_CAPTURE,
1806                       &period_frames, &buffer_frames, tsched_frames,
1807                       &b, &d, FALSE)))
1808             goto fail;
1809     }
1810
1811     pa_assert(u->device_name);
1812     pa_log_info("Successfully opened device %s.", u->device_name);
1813
1814     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1815         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1816         goto fail;
1817     }
1818
1819     if (mapping)
1820         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1821
1822     if (use_mmap && !b) {
1823         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1824         u->use_mmap = use_mmap = FALSE;
1825     }
1826
1827     if (use_tsched && (!b || !d)) {
1828         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1829         u->use_tsched = use_tsched = FALSE;
1830     }
1831
1832     if (u->use_mmap)
1833         pa_log_info("Successfully enabled mmap() mode.");
1834
1835     if (u->use_tsched)
1836         pa_log_info("Successfully enabled timer-based scheduling mode.");
1837
1838     /* ALSA might tweak the sample spec, so recalculate the frame size */
1839     frame_size = pa_frame_size(&ss);
1840
1841     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1842
1843     pa_source_new_data_init(&data);
1844     data.driver = driver;
1845     data.module = m;
1846     data.card = card;
1847     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1848
1849     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1850      * variable instead of using &data.namereg_fail directly, because
1851      * data.namereg_fail is a bitfield and taking the address of a bitfield
1852      * variable is impossible. */
1853     namereg_fail = data.namereg_fail;
1854     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1855         pa_log("Failed to parse namereg_fail argument.");
1856         pa_source_new_data_done(&data);
1857         goto fail;
1858     }
1859     data.namereg_fail = namereg_fail;
1860
1861     pa_source_new_data_set_sample_spec(&data, &ss);
1862     pa_source_new_data_set_channel_map(&data, &map);
1863
1864     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1865     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1866     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1867     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1868     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1869
1870     if (mapping) {
1871         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1872         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1873     }
1874
1875     pa_alsa_init_description(data.proplist);
1876
1877     if (u->control_device)
1878         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1879
1880     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1881         pa_log("Invalid properties");
1882         pa_source_new_data_done(&data);
1883         goto fail;
1884     }
1885
1886     if (u->mixer_path_set)
1887         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1888
1889     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1890     pa_source_new_data_done(&data);
1891
1892     if (!u->source) {
1893         pa_log("Failed to create source object");
1894         goto fail;
1895     }
1896
1897     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1898                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1899         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1900         goto fail;
1901     }
1902
1903     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1904                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1905         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1906         goto fail;
1907     }
1908
1909     u->source->parent.process_msg = source_process_msg;
1910     if (u->use_tsched)
1911         u->source->update_requested_latency = source_update_requested_latency_cb;
1912     u->source->set_state = source_set_state_cb;
1913     u->source->set_port = source_set_port_cb;
1914     u->source->userdata = u;
1915
1916     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1917     pa_source_set_rtpoll(u->source, u->rtpoll);
1918
1919     u->frame_size = frame_size;
1920     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1921     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1922     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1923
1924     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1925                 (double) u->hwbuf_size / (double) u->fragment_size,
1926                 (long unsigned) u->fragment_size,
1927                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1928                 (long unsigned) u->hwbuf_size,
1929                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1930
1931     if (u->use_tsched) {
1932         u->tsched_watermark_ref = tsched_watermark;
1933         reset_watermark(u, u->tsched_watermark_ref, &ss, FALSE);
1934     }
1935     else
1936         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1937
1938     reserve_update(u);
1939
1940     if (update_sw_params(u) < 0)
1941         goto fail;
1942
1943     if (setup_mixer(u, ignore_dB) < 0)
1944         goto fail;
1945
1946     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1947
1948     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1949         pa_log("Failed to create thread.");
1950         goto fail;
1951     }
1952
1953     /* Get initial mixer settings */
1954     if (data.volume_is_set) {
1955         if (u->source->set_volume)
1956             u->source->set_volume(u->source);
1957     } else {
1958         if (u->source->get_volume)
1959             u->source->get_volume(u->source);
1960     }
1961
1962     if (data.muted_is_set) {
1963         if (u->source->set_mute)
1964             u->source->set_mute(u->source);
1965     } else {
1966         if (u->source->get_mute)
1967             u->source->get_mute(u->source);
1968     }
1969
1970     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
1971         u->source->write_volume(u->source);
1972
1973     pa_source_put(u->source);
1974
1975     if (profile_set)
1976         pa_alsa_profile_set_free(profile_set);
1977
1978     return u->source;
1979
1980 fail:
1981
1982     if (u)
1983         userdata_free(u);
1984
1985     if (profile_set)
1986         pa_alsa_profile_set_free(profile_set);
1987
1988     return NULL;
1989 }
1990
1991 static void userdata_free(struct userdata *u) {
1992     pa_assert(u);
1993
1994     if (u->source)
1995         pa_source_unlink(u->source);
1996
1997     if (u->thread) {
1998         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1999         pa_thread_free(u->thread);
2000     }
2001
2002     pa_thread_mq_done(&u->thread_mq);
2003
2004     if (u->source)
2005         pa_source_unref(u->source);
2006
2007     if (u->mixer_pd)
2008         pa_alsa_mixer_pdata_free(u->mixer_pd);
2009
2010     if (u->alsa_rtpoll_item)
2011         pa_rtpoll_item_free(u->alsa_rtpoll_item);
2012
2013     if (u->rtpoll)
2014         pa_rtpoll_free(u->rtpoll);
2015
2016     if (u->pcm_handle) {
2017         snd_pcm_drop(u->pcm_handle);
2018         snd_pcm_close(u->pcm_handle);
2019     }
2020
2021     if (u->mixer_fdl)
2022         pa_alsa_fdlist_free(u->mixer_fdl);
2023
2024     if (u->mixer_path_set)
2025         pa_alsa_path_set_free(u->mixer_path_set);
2026     else if (u->mixer_path)
2027         pa_alsa_path_free(u->mixer_path);
2028
2029     if (u->mixer_handle)
2030         snd_mixer_close(u->mixer_handle);
2031
2032     if (u->smoother)
2033         pa_smoother_free(u->smoother);
2034
2035     reserve_done(u);
2036     monitor_done(u);
2037
2038     pa_xfree(u->device_name);
2039     pa_xfree(u->control_device);
2040     pa_xfree(u);
2041 }
2042
2043 void pa_alsa_source_free(pa_source *s) {
2044     struct userdata *u;
2045
2046     pa_source_assert_ref(s);
2047     pa_assert_se(u = s->userdata);
2048
2049     userdata_free(u);
2050 }