sink,source: Avoid crash by not updating volume on shutdown
[platform/upstream/pulseaudio.git] / src / modules / alsa / alsa-source.c
1 /***
2   This file is part of PulseAudio.
3
4   Copyright 2004-2008 Lennart Poettering
5   Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7   PulseAudio is free software; you can redistribute it and/or modify
8   it under the terms of the GNU Lesser General Public License as published
9   by the Free Software Foundation; either version 2.1 of the License,
10   or (at your option) any later version.
11
12   PulseAudio is distributed in the hope that it will be useful, but
13   WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15   General Public License for more details.
16
17   You should have received a copy of the GNU Lesser General Public License
18   along with PulseAudio; if not, write to the Free Software
19   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20   USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28
29 #include <asoundlib.h>
30
31 #include <pulse/rtclock.h>
32 #include <pulse/timeval.h>
33 #include <pulse/volume.h>
34 #include <pulse/xmalloc.h>
35
36 #include <pulsecore/core.h>
37 #include <pulsecore/i18n.h>
38 #include <pulsecore/module.h>
39 #include <pulsecore/memchunk.h>
40 #include <pulsecore/sink.h>
41 #include <pulsecore/modargs.h>
42 #include <pulsecore/core-rtclock.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/log.h>
46 #include <pulsecore/macro.h>
47 #include <pulsecore/thread.h>
48 #include <pulsecore/thread-mq.h>
49 #include <pulsecore/rtpoll.h>
50 #include <pulsecore/time-smoother.h>
51
52 #include <modules/reserve-wrap.h>
53
54 #include "alsa-util.h"
55 #include "alsa-source.h"
56
57 /* #define DEBUG_TIMING */
58
59 #define DEFAULT_DEVICE "default"
60
61 #define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC)             /* 2s */
62 #define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC)        /* 20ms */
63
64 #define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC)       /* 10ms  */
65 #define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC)        /* 5ms */
66 #define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC)    /* 20s */
67 #define TSCHED_WATERMARK_INC_THRESHOLD_USEC (0*PA_USEC_PER_MSEC)   /* 0ms */
68 #define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */
69 #define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC)           /* 10ms */
70
71 #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC)                /* 10ms */
72 #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC)                /* 4ms */
73
74 #define SMOOTHER_WINDOW_USEC  (10*PA_USEC_PER_SEC)                 /* 10s */
75 #define SMOOTHER_ADJUST_USEC  (1*PA_USEC_PER_SEC)                  /* 1s */
76
77 #define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC)                 /* 2ms */
78 #define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC)               /* 200ms */
79
80 #define VOLUME_ACCURACY (PA_VOLUME_NORM/100)
81
82 struct userdata {
83     pa_core *core;
84     pa_module *module;
85     pa_source *source;
86
87     pa_thread *thread;
88     pa_thread_mq thread_mq;
89     pa_rtpoll *rtpoll;
90
91     snd_pcm_t *pcm_handle;
92
93     pa_alsa_fdlist *mixer_fdl;
94     pa_alsa_mixer_pdata *mixer_pd;
95     snd_mixer_t *mixer_handle;
96     pa_alsa_path_set *mixer_path_set;
97     pa_alsa_path *mixer_path;
98
99     pa_cvolume hardware_volume;
100
101     size_t
102         frame_size,
103         fragment_size,
104         hwbuf_size,
105         tsched_watermark,
106         hwbuf_unused,
107         min_sleep,
108         min_wakeup,
109         watermark_inc_step,
110         watermark_dec_step,
111         watermark_inc_threshold,
112         watermark_dec_threshold;
113
114     pa_usec_t watermark_dec_not_before;
115
116     char *device_name;  /* name of the PCM device */
117     char *control_device; /* name of the control device */
118
119     pa_bool_t use_mmap:1, use_tsched:1, deferred_volume:1;
120
121     pa_bool_t first;
122
123     pa_rtpoll_item *alsa_rtpoll_item;
124
125     snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
126
127     pa_smoother *smoother;
128     uint64_t read_count;
129     pa_usec_t smoother_interval;
130     pa_usec_t last_smoother_update;
131
132     pa_reserve_wrapper *reserve;
133     pa_hook_slot *reserve_slot;
134     pa_reserve_monitor_wrapper *monitor;
135     pa_hook_slot *monitor_slot;
136 };
137
138 static void userdata_free(struct userdata *u);
139
140 static pa_hook_result_t reserve_cb(pa_reserve_wrapper *r, void *forced, struct userdata *u) {
141     pa_assert(r);
142     pa_assert(u);
143
144     if (pa_source_suspend(u->source, TRUE, PA_SUSPEND_APPLICATION) < 0)
145         return PA_HOOK_CANCEL;
146
147     return PA_HOOK_OK;
148 }
149
150 static void reserve_done(struct userdata *u) {
151     pa_assert(u);
152
153     if (u->reserve_slot) {
154         pa_hook_slot_free(u->reserve_slot);
155         u->reserve_slot = NULL;
156     }
157
158     if (u->reserve) {
159         pa_reserve_wrapper_unref(u->reserve);
160         u->reserve = NULL;
161     }
162 }
163
164 static void reserve_update(struct userdata *u) {
165     const char *description;
166     pa_assert(u);
167
168     if (!u->source || !u->reserve)
169         return;
170
171     if ((description = pa_proplist_gets(u->source->proplist, PA_PROP_DEVICE_DESCRIPTION)))
172         pa_reserve_wrapper_set_application_device_name(u->reserve, description);
173 }
174
175 static int reserve_init(struct userdata *u, const char *dname) {
176     char *rname;
177
178     pa_assert(u);
179     pa_assert(dname);
180
181     if (u->reserve)
182         return 0;
183
184     if (pa_in_system_mode())
185         return 0;
186
187     if (!(rname = pa_alsa_get_reserve_name(dname)))
188         return 0;
189
190     /* We are resuming, try to lock the device */
191     u->reserve = pa_reserve_wrapper_get(u->core, rname);
192     pa_xfree(rname);
193
194     if (!(u->reserve))
195         return -1;
196
197     reserve_update(u);
198
199     pa_assert(!u->reserve_slot);
200     u->reserve_slot = pa_hook_connect(pa_reserve_wrapper_hook(u->reserve), PA_HOOK_NORMAL, (pa_hook_cb_t) reserve_cb, u);
201
202     return 0;
203 }
204
205 static pa_hook_result_t monitor_cb(pa_reserve_monitor_wrapper *w, void* busy, struct userdata *u) {
206     pa_bool_t b;
207
208     pa_assert(w);
209     pa_assert(u);
210
211     b = PA_PTR_TO_UINT(busy) && !u->reserve;
212
213     pa_source_suspend(u->source, b, PA_SUSPEND_APPLICATION);
214     return PA_HOOK_OK;
215 }
216
217 static void monitor_done(struct userdata *u) {
218     pa_assert(u);
219
220     if (u->monitor_slot) {
221         pa_hook_slot_free(u->monitor_slot);
222         u->monitor_slot = NULL;
223     }
224
225     if (u->monitor) {
226         pa_reserve_monitor_wrapper_unref(u->monitor);
227         u->monitor = NULL;
228     }
229 }
230
231 static int reserve_monitor_init(struct userdata *u, const char *dname) {
232     char *rname;
233
234     pa_assert(u);
235     pa_assert(dname);
236
237     if (pa_in_system_mode())
238         return 0;
239
240     if (!(rname = pa_alsa_get_reserve_name(dname)))
241         return 0;
242
243     /* We are resuming, try to lock the device */
244     u->monitor = pa_reserve_monitor_wrapper_get(u->core, rname);
245     pa_xfree(rname);
246
247     if (!(u->monitor))
248         return -1;
249
250     pa_assert(!u->monitor_slot);
251     u->monitor_slot = pa_hook_connect(pa_reserve_monitor_wrapper_hook(u->monitor), PA_HOOK_NORMAL, (pa_hook_cb_t) monitor_cb, u);
252
253     return 0;
254 }
255
256 static void fix_min_sleep_wakeup(struct userdata *u) {
257     size_t max_use, max_use_2;
258
259     pa_assert(u);
260     pa_assert(u->use_tsched);
261
262     max_use = u->hwbuf_size - u->hwbuf_unused;
263     max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec);
264
265     u->min_sleep = pa_usec_to_bytes(TSCHED_MIN_SLEEP_USEC, &u->source->sample_spec);
266     u->min_sleep = PA_CLAMP(u->min_sleep, u->frame_size, max_use_2);
267
268     u->min_wakeup = pa_usec_to_bytes(TSCHED_MIN_WAKEUP_USEC, &u->source->sample_spec);
269     u->min_wakeup = PA_CLAMP(u->min_wakeup, u->frame_size, max_use_2);
270 }
271
272 static void fix_tsched_watermark(struct userdata *u) {
273     size_t max_use;
274     pa_assert(u);
275     pa_assert(u->use_tsched);
276
277     max_use = u->hwbuf_size - u->hwbuf_unused;
278
279     if (u->tsched_watermark > max_use - u->min_sleep)
280         u->tsched_watermark = max_use - u->min_sleep;
281
282     if (u->tsched_watermark < u->min_wakeup)
283         u->tsched_watermark = u->min_wakeup;
284 }
285
286 static void increase_watermark(struct userdata *u) {
287     size_t old_watermark;
288     pa_usec_t old_min_latency, new_min_latency;
289
290     pa_assert(u);
291     pa_assert(u->use_tsched);
292
293     /* First, just try to increase the watermark */
294     old_watermark = u->tsched_watermark;
295     u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step);
296     fix_tsched_watermark(u);
297
298     if (old_watermark != u->tsched_watermark) {
299         pa_log_info("Increasing wakeup watermark to %0.2f ms",
300                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
301         return;
302     }
303
304     /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */
305     old_min_latency = u->source->thread_info.min_latency;
306     new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC);
307     new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency);
308
309     if (old_min_latency != new_min_latency) {
310         pa_log_info("Increasing minimal latency to %0.2f ms",
311                     (double) new_min_latency / PA_USEC_PER_MSEC);
312
313         pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency);
314     }
315
316     /* When we reach this we're officialy fucked! */
317 }
318
319 static void decrease_watermark(struct userdata *u) {
320     size_t old_watermark;
321     pa_usec_t now;
322
323     pa_assert(u);
324     pa_assert(u->use_tsched);
325
326     now = pa_rtclock_now();
327
328     if (u->watermark_dec_not_before <= 0)
329         goto restart;
330
331     if (u->watermark_dec_not_before > now)
332         return;
333
334     old_watermark = u->tsched_watermark;
335
336     if (u->tsched_watermark < u->watermark_dec_step)
337         u->tsched_watermark = u->tsched_watermark / 2;
338     else
339         u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step);
340
341     fix_tsched_watermark(u);
342
343     if (old_watermark != u->tsched_watermark)
344         pa_log_info("Decreasing wakeup watermark to %0.2f ms",
345                     (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC);
346
347     /* We don't change the latency range*/
348
349 restart:
350     u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC;
351 }
352
353 static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) {
354     pa_usec_t wm, usec;
355
356     pa_assert(sleep_usec);
357     pa_assert(process_usec);
358
359     pa_assert(u);
360     pa_assert(u->use_tsched);
361
362     usec = pa_source_get_requested_latency_within_thread(u->source);
363
364     if (usec == (pa_usec_t) -1)
365         usec = pa_bytes_to_usec(u->hwbuf_size, &u->source->sample_spec);
366
367     wm = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
368
369     if (wm > usec)
370         wm = usec/2;
371
372     *sleep_usec = usec - wm;
373     *process_usec = wm;
374
375 #ifdef DEBUG_TIMING
376     pa_log_debug("Buffer time: %lu ms; Sleep time: %lu ms; Process time: %lu ms",
377                  (unsigned long) (usec / PA_USEC_PER_MSEC),
378                  (unsigned long) (*sleep_usec / PA_USEC_PER_MSEC),
379                  (unsigned long) (*process_usec / PA_USEC_PER_MSEC));
380 #endif
381 }
382
383 static int try_recover(struct userdata *u, const char *call, int err) {
384     pa_assert(u);
385     pa_assert(call);
386     pa_assert(err < 0);
387
388     pa_log_debug("%s: %s", call, pa_alsa_strerror(err));
389
390     pa_assert(err != -EAGAIN);
391
392     if (err == -EPIPE)
393         pa_log_debug("%s: Buffer overrun!", call);
394
395     if (err == -ESTRPIPE)
396         pa_log_debug("%s: System suspended!", call);
397
398     if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) < 0) {
399         pa_log("%s: %s", call, pa_alsa_strerror(err));
400         return -1;
401     }
402
403     u->first = TRUE;
404     return 0;
405 }
406
407 static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) {
408     size_t left_to_record;
409     size_t rec_space = u->hwbuf_size - u->hwbuf_unused;
410     pa_bool_t overrun = FALSE;
411
412     /* We use <= instead of < for this check here because an overrun
413      * only happens after the last sample was processed, not already when
414      * it is removed from the buffer. This is particularly important
415      * when block transfer is used. */
416
417     if (n_bytes <= rec_space)
418         left_to_record = rec_space - n_bytes;
419     else {
420
421         /* We got a dropout. What a mess! */
422         left_to_record = 0;
423         overrun = TRUE;
424
425 #ifdef DEBUG_TIMING
426         PA_DEBUG_TRAP;
427 #endif
428
429         if (pa_log_ratelimit(PA_LOG_INFO))
430             pa_log_info("Overrun!");
431     }
432
433 #ifdef DEBUG_TIMING
434     pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC);
435 #endif
436
437     if (u->use_tsched) {
438         pa_bool_t reset_not_before = TRUE;
439
440         if (overrun || left_to_record < u->watermark_inc_threshold)
441             increase_watermark(u);
442         else if (left_to_record > u->watermark_dec_threshold) {
443             reset_not_before = FALSE;
444
445             /* We decrease the watermark only if have actually
446              * been woken up by a timeout. If something else woke
447              * us up it's too easy to fulfill the deadlines... */
448
449             if (on_timeout)
450                 decrease_watermark(u);
451         }
452
453         if (reset_not_before)
454             u->watermark_dec_not_before = 0;
455     }
456
457     return left_to_record;
458 }
459
460 static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
461     pa_bool_t work_done = FALSE;
462     pa_usec_t max_sleep_usec = 0, process_usec = 0;
463     size_t left_to_record;
464     unsigned j = 0;
465
466     pa_assert(u);
467     pa_source_assert_ref(u->source);
468
469     if (u->use_tsched)
470         hw_sleep_time(u, &max_sleep_usec, &process_usec);
471
472     for (;;) {
473         snd_pcm_sframes_t n;
474         size_t n_bytes;
475         int r;
476         pa_bool_t after_avail = TRUE;
477
478         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
479
480             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
481                 continue;
482
483             return r;
484         }
485
486         n_bytes = (size_t) n * u->frame_size;
487
488 #ifdef DEBUG_TIMING
489         pa_log_debug("avail: %lu", (unsigned long) n_bytes);
490 #endif
491
492         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
493         on_timeout = FALSE;
494
495         if (u->use_tsched)
496             if (!polled &&
497                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2) {
498 #ifdef DEBUG_TIMING
499                 pa_log_debug("Not reading, because too early.");
500 #endif
501                 break;
502             }
503
504         if (PA_UNLIKELY(n_bytes <= 0)) {
505
506             if (polled)
507                 PA_ONCE_BEGIN {
508                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
509                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
510                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
511                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
512                            pa_strnull(dn));
513                     pa_xfree(dn);
514                 } PA_ONCE_END;
515
516 #ifdef DEBUG_TIMING
517             pa_log_debug("Not reading, because not necessary.");
518 #endif
519             break;
520         }
521
522
523         if (++j > 10) {
524 #ifdef DEBUG_TIMING
525             pa_log_debug("Not filling up, because already too many iterations.");
526 #endif
527
528             break;
529         }
530
531         polled = FALSE;
532
533 #ifdef DEBUG_TIMING
534         pa_log_debug("Reading");
535 #endif
536
537         for (;;) {
538             pa_memchunk chunk;
539             void *p;
540             int err;
541             const snd_pcm_channel_area_t *areas;
542             snd_pcm_uframes_t offset, frames;
543             snd_pcm_sframes_t sframes;
544
545             frames = (snd_pcm_uframes_t) (n_bytes / u->frame_size);
546 /*             pa_log_debug("%lu frames to read", (unsigned long) frames); */
547
548             if (PA_UNLIKELY((err = pa_alsa_safe_mmap_begin(u->pcm_handle, &areas, &offset, &frames, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
549
550                 if (!after_avail && err == -EAGAIN)
551                     break;
552
553                 if ((r = try_recover(u, "snd_pcm_mmap_begin", err)) == 0)
554                     continue;
555
556                 return r;
557             }
558
559             /* Make sure that if these memblocks need to be copied they will fit into one slot */
560             if (frames > pa_mempool_block_size_max(u->core->mempool)/u->frame_size)
561                 frames = pa_mempool_block_size_max(u->core->mempool)/u->frame_size;
562
563             if (!after_avail && frames == 0)
564                 break;
565
566             pa_assert(frames > 0);
567             after_avail = FALSE;
568
569             /* Check these are multiples of 8 bit */
570             pa_assert((areas[0].first & 7) == 0);
571             pa_assert((areas[0].step & 7)== 0);
572
573             /* We assume a single interleaved memory buffer */
574             pa_assert((areas[0].first >> 3) == 0);
575             pa_assert((areas[0].step >> 3) == u->frame_size);
576
577             p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
578
579             chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, TRUE);
580             chunk.length = pa_memblock_get_length(chunk.memblock);
581             chunk.index = 0;
582
583             pa_source_post(u->source, &chunk);
584             pa_memblock_unref_fixed(chunk.memblock);
585
586             if (PA_UNLIKELY((sframes = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0)) {
587
588                 if ((r = try_recover(u, "snd_pcm_mmap_commit", (int) sframes)) == 0)
589                     continue;
590
591                 return r;
592             }
593
594             work_done = TRUE;
595
596             u->read_count += frames * u->frame_size;
597
598 #ifdef DEBUG_TIMING
599             pa_log_debug("Read %lu bytes (of possible %lu bytes)", (unsigned long) (frames * u->frame_size), (unsigned long) n_bytes);
600 #endif
601
602             if ((size_t) frames * u->frame_size >= n_bytes)
603                 break;
604
605             n_bytes -= (size_t) frames * u->frame_size;
606         }
607     }
608
609     if (u->use_tsched) {
610         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
611         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
612
613         if (*sleep_usec > process_usec)
614             *sleep_usec -= process_usec;
615         else
616             *sleep_usec = 0;
617     }
618
619     return work_done ? 1 : 0;
620 }
621
622 static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) {
623     int work_done = FALSE;
624     pa_usec_t max_sleep_usec = 0, process_usec = 0;
625     size_t left_to_record;
626     unsigned j = 0;
627
628     pa_assert(u);
629     pa_source_assert_ref(u->source);
630
631     if (u->use_tsched)
632         hw_sleep_time(u, &max_sleep_usec, &process_usec);
633
634     for (;;) {
635         snd_pcm_sframes_t n;
636         size_t n_bytes;
637         int r;
638         pa_bool_t after_avail = TRUE;
639
640         if (PA_UNLIKELY((n = pa_alsa_safe_avail(u->pcm_handle, u->hwbuf_size, &u->source->sample_spec)) < 0)) {
641
642             if ((r = try_recover(u, "snd_pcm_avail", (int) n)) == 0)
643                 continue;
644
645             return r;
646         }
647
648         n_bytes = (size_t) n * u->frame_size;
649         left_to_record = check_left_to_record(u, n_bytes, on_timeout);
650         on_timeout = FALSE;
651
652         if (u->use_tsched)
653             if (!polled &&
654                 pa_bytes_to_usec(left_to_record, &u->source->sample_spec) > process_usec+max_sleep_usec/2)
655                 break;
656
657         if (PA_UNLIKELY(n_bytes <= 0)) {
658
659             if (polled)
660                 PA_ONCE_BEGIN {
661                     char *dn = pa_alsa_get_driver_name_by_pcm(u->pcm_handle);
662                     pa_log(_("ALSA woke us up to read new data from the device, but there was actually nothing to read!\n"
663                              "Most likely this is a bug in the ALSA driver '%s'. Please report this issue to the ALSA developers.\n"
664                              "We were woken up with POLLIN set -- however a subsequent snd_pcm_avail() returned 0 or another value < min_avail."),
665                            pa_strnull(dn));
666                     pa_xfree(dn);
667                 } PA_ONCE_END;
668
669             break;
670         }
671
672         if (++j > 10) {
673 #ifdef DEBUG_TIMING
674             pa_log_debug("Not filling up, because already too many iterations.");
675 #endif
676
677             break;
678         }
679
680         polled = FALSE;
681
682         for (;;) {
683             void *p;
684             snd_pcm_sframes_t frames;
685             pa_memchunk chunk;
686
687             chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
688
689             frames = (snd_pcm_sframes_t) (pa_memblock_get_length(chunk.memblock) / u->frame_size);
690
691             if (frames > (snd_pcm_sframes_t) (n_bytes/u->frame_size))
692                 frames = (snd_pcm_sframes_t) (n_bytes/u->frame_size);
693
694 /*             pa_log_debug("%lu frames to read", (unsigned long) n); */
695
696             p = pa_memblock_acquire(chunk.memblock);
697             frames = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, (snd_pcm_uframes_t) frames);
698             pa_memblock_release(chunk.memblock);
699
700             if (PA_UNLIKELY(frames < 0)) {
701                 pa_memblock_unref(chunk.memblock);
702
703                 if (!after_avail && (int) frames == -EAGAIN)
704                     break;
705
706                 if ((r = try_recover(u, "snd_pcm_readi", (int) frames)) == 0)
707                     continue;
708
709                 return r;
710             }
711
712             if (!after_avail && frames == 0) {
713                 pa_memblock_unref(chunk.memblock);
714                 break;
715             }
716
717             pa_assert(frames > 0);
718             after_avail = FALSE;
719
720             chunk.index = 0;
721             chunk.length = (size_t) frames * u->frame_size;
722
723             pa_source_post(u->source, &chunk);
724             pa_memblock_unref(chunk.memblock);
725
726             work_done = TRUE;
727
728             u->read_count += frames * u->frame_size;
729
730 /*             pa_log_debug("read %lu frames", (unsigned long) frames); */
731
732             if ((size_t) frames * u->frame_size >= n_bytes)
733                 break;
734
735             n_bytes -= (size_t) frames * u->frame_size;
736         }
737     }
738
739     if (u->use_tsched) {
740         *sleep_usec = pa_bytes_to_usec(left_to_record, &u->source->sample_spec);
741         process_usec = pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec);
742
743         if (*sleep_usec > process_usec)
744             *sleep_usec -= process_usec;
745         else
746             *sleep_usec = 0;
747     }
748
749     return work_done ? 1 : 0;
750 }
751
752 static void update_smoother(struct userdata *u) {
753     snd_pcm_sframes_t delay = 0;
754     uint64_t position;
755     int err;
756     pa_usec_t now1 = 0, now2;
757     snd_pcm_status_t *status;
758
759     snd_pcm_status_alloca(&status);
760
761     pa_assert(u);
762     pa_assert(u->pcm_handle);
763
764     /* Let's update the time smoother */
765
766     if (PA_UNLIKELY((err = pa_alsa_safe_delay(u->pcm_handle, &delay, u->hwbuf_size, &u->source->sample_spec, TRUE)) < 0)) {
767         pa_log_warn("Failed to get delay: %s", pa_alsa_strerror(err));
768         return;
769     }
770
771     if (PA_UNLIKELY((err = snd_pcm_status(u->pcm_handle, status)) < 0))
772         pa_log_warn("Failed to get timestamp: %s", pa_alsa_strerror(err));
773     else {
774         snd_htimestamp_t htstamp = { 0, 0 };
775         snd_pcm_status_get_htstamp(status, &htstamp);
776         now1 = pa_timespec_load(&htstamp);
777     }
778
779     /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */
780     if (now1 <= 0)
781         now1 = pa_rtclock_now();
782
783     /* check if the time since the last update is bigger than the interval */
784     if (u->last_smoother_update > 0)
785         if (u->last_smoother_update + u->smoother_interval > now1)
786             return;
787
788     position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size);
789     now2 = pa_bytes_to_usec(position, &u->source->sample_spec);
790
791     pa_smoother_put(u->smoother, now1, now2);
792
793     u->last_smoother_update = now1;
794     /* exponentially increase the update interval up to the MAX limit */
795     u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL);
796 }
797
798 static pa_usec_t source_get_latency(struct userdata *u) {
799     int64_t delay;
800     pa_usec_t now1, now2;
801
802     pa_assert(u);
803
804     now1 = pa_rtclock_now();
805     now2 = pa_smoother_get(u->smoother, now1);
806
807     delay = (int64_t) now2 - (int64_t) pa_bytes_to_usec(u->read_count, &u->source->sample_spec);
808
809     return delay >= 0 ? (pa_usec_t) delay : 0;
810 }
811
812 static int build_pollfd(struct userdata *u) {
813     pa_assert(u);
814     pa_assert(u->pcm_handle);
815
816     if (u->alsa_rtpoll_item)
817         pa_rtpoll_item_free(u->alsa_rtpoll_item);
818
819     if (!(u->alsa_rtpoll_item = pa_alsa_build_pollfd(u->pcm_handle, u->rtpoll)))
820         return -1;
821
822     return 0;
823 }
824
825 /* Called from IO context */
826 static int suspend(struct userdata *u) {
827     pa_assert(u);
828     pa_assert(u->pcm_handle);
829
830     pa_smoother_pause(u->smoother, pa_rtclock_now());
831
832     /* Let's suspend */
833     snd_pcm_close(u->pcm_handle);
834     u->pcm_handle = NULL;
835
836     if (u->alsa_rtpoll_item) {
837         pa_rtpoll_item_free(u->alsa_rtpoll_item);
838         u->alsa_rtpoll_item = NULL;
839     }
840
841     pa_log_info("Device suspended...");
842
843     return 0;
844 }
845
846 /* Called from IO context */
847 static int update_sw_params(struct userdata *u) {
848     snd_pcm_uframes_t avail_min;
849     int err;
850
851     pa_assert(u);
852
853     /* Use the full buffer if no one asked us for anything specific */
854     u->hwbuf_unused = 0;
855
856     if (u->use_tsched) {
857         pa_usec_t latency;
858
859         if ((latency = pa_source_get_requested_latency_within_thread(u->source)) != (pa_usec_t) -1) {
860             size_t b;
861
862             pa_log_debug("latency set to %0.2fms", (double) latency / PA_USEC_PER_MSEC);
863
864             b = pa_usec_to_bytes(latency, &u->source->sample_spec);
865
866             /* We need at least one sample in our buffer */
867
868             if (PA_UNLIKELY(b < u->frame_size))
869                 b = u->frame_size;
870
871             u->hwbuf_unused = PA_LIKELY(b < u->hwbuf_size) ? (u->hwbuf_size - b) : 0;
872         }
873
874         fix_min_sleep_wakeup(u);
875         fix_tsched_watermark(u);
876     }
877
878     pa_log_debug("hwbuf_unused=%lu", (unsigned long) u->hwbuf_unused);
879
880     avail_min = 1;
881
882     if (u->use_tsched) {
883         pa_usec_t sleep_usec, process_usec;
884
885         hw_sleep_time(u, &sleep_usec, &process_usec);
886         avail_min += pa_usec_to_bytes(sleep_usec, &u->source->sample_spec) / u->frame_size;
887     }
888
889     pa_log_debug("setting avail_min=%lu", (unsigned long) avail_min);
890
891     if ((err = pa_alsa_set_sw_params(u->pcm_handle, avail_min, !u->use_tsched)) < 0) {
892         pa_log("Failed to set software parameters: %s", pa_alsa_strerror(err));
893         return err;
894     }
895
896     return 0;
897 }
898
899 /* Called from IO context */
900 static int unsuspend(struct userdata *u) {
901     pa_sample_spec ss;
902     int err;
903     pa_bool_t b, d;
904     snd_pcm_uframes_t period_size, buffer_size;
905
906     pa_assert(u);
907     pa_assert(!u->pcm_handle);
908
909     pa_log_info("Trying resume...");
910
911     if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE,
912                             SND_PCM_NONBLOCK|
913                             SND_PCM_NO_AUTO_RESAMPLE|
914                             SND_PCM_NO_AUTO_CHANNELS|
915                             SND_PCM_NO_AUTO_FORMAT)) < 0) {
916         pa_log("Error opening PCM device %s: %s", u->device_name, pa_alsa_strerror(err));
917         goto fail;
918     }
919
920     ss = u->source->sample_spec;
921     period_size = u->fragment_size / u->frame_size;
922     buffer_size = u->hwbuf_size / u->frame_size;
923     b = u->use_mmap;
924     d = u->use_tsched;
925
926     if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &period_size, &buffer_size, 0, &b, &d, TRUE)) < 0) {
927         pa_log("Failed to set hardware parameters: %s", pa_alsa_strerror(err));
928         goto fail;
929     }
930
931     if (b != u->use_mmap || d != u->use_tsched) {
932         pa_log_warn("Resume failed, couldn't get original access mode.");
933         goto fail;
934     }
935
936     if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
937         pa_log_warn("Resume failed, couldn't restore original sample settings.");
938         goto fail;
939     }
940
941     if (period_size*u->frame_size != u->fragment_size ||
942         buffer_size*u->frame_size != u->hwbuf_size) {
943         pa_log_warn("Resume failed, couldn't restore original fragment settings. (Old: %lu/%lu, New %lu/%lu)",
944                     (unsigned long) u->hwbuf_size, (unsigned long) u->fragment_size,
945                     (unsigned long) (buffer_size*u->frame_size), (unsigned long) (period_size*u->frame_size));
946         goto fail;
947     }
948
949     if (update_sw_params(u) < 0)
950         goto fail;
951
952     if (build_pollfd(u) < 0)
953         goto fail;
954
955     /* FIXME: We need to reload the volume somehow */
956
957     u->read_count = 0;
958     pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE);
959     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
960     u->last_smoother_update = 0;
961
962     u->first = TRUE;
963
964     pa_log_info("Resumed successfully...");
965
966     return 0;
967
968 fail:
969     if (u->pcm_handle) {
970         snd_pcm_close(u->pcm_handle);
971         u->pcm_handle = NULL;
972     }
973
974     return -PA_ERR_IO;
975 }
976
977 /* Called from IO context */
978 static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
979     struct userdata *u = PA_SOURCE(o)->userdata;
980
981     switch (code) {
982
983         case PA_SOURCE_MESSAGE_GET_LATENCY: {
984             pa_usec_t r = 0;
985
986             if (u->pcm_handle)
987                 r = source_get_latency(u);
988
989             *((pa_usec_t*) data) = r;
990
991             return 0;
992         }
993
994         case PA_SOURCE_MESSAGE_SET_STATE:
995
996             switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
997
998                 case PA_SOURCE_SUSPENDED: {
999                     int r;
1000
1001                     pa_assert(PA_SOURCE_IS_OPENED(u->source->thread_info.state));
1002
1003                     if ((r = suspend(u)) < 0)
1004                         return r;
1005
1006                     break;
1007                 }
1008
1009                 case PA_SOURCE_IDLE:
1010                 case PA_SOURCE_RUNNING: {
1011                     int r;
1012
1013                     if (u->source->thread_info.state == PA_SOURCE_INIT) {
1014                         if (build_pollfd(u) < 0)
1015                             return -PA_ERR_IO;
1016                     }
1017
1018                     if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
1019                         if ((r = unsuspend(u)) < 0)
1020                             return r;
1021                     }
1022
1023                     break;
1024                 }
1025
1026                 case PA_SOURCE_UNLINKED:
1027                 case PA_SOURCE_INIT:
1028                 case PA_SOURCE_INVALID_STATE:
1029                     ;
1030             }
1031
1032             break;
1033     }
1034
1035     return pa_source_process_msg(o, code, data, offset, chunk);
1036 }
1037
1038 /* Called from main context */
1039 static int source_set_state_cb(pa_source *s, pa_source_state_t new_state) {
1040     pa_source_state_t old_state;
1041     struct userdata *u;
1042
1043     pa_source_assert_ref(s);
1044     pa_assert_se(u = s->userdata);
1045
1046     old_state = pa_source_get_state(u->source);
1047
1048     if (PA_SOURCE_IS_OPENED(old_state) && new_state == PA_SOURCE_SUSPENDED)
1049         reserve_done(u);
1050     else if (old_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(new_state))
1051         if (reserve_init(u, u->device_name) < 0)
1052             return -PA_ERR_BUSY;
1053
1054     return 0;
1055 }
1056
1057 static int ctl_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1058     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1059
1060     pa_assert(u);
1061     pa_assert(u->mixer_handle);
1062
1063     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1064         return 0;
1065
1066     if (!PA_SOURCE_IS_LINKED(u->source->state))
1067         return 0;
1068
1069     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1070         return 0;
1071
1072     if (mask & SND_CTL_EVENT_MASK_VALUE) {
1073         pa_source_get_volume(u->source, TRUE);
1074         pa_source_get_mute(u->source, TRUE);
1075     }
1076
1077     return 0;
1078 }
1079
1080 static int io_mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
1081     struct userdata *u = snd_mixer_elem_get_callback_private(elem);
1082
1083     pa_assert(u);
1084     pa_assert(u->mixer_handle);
1085
1086     if (mask == SND_CTL_EVENT_MASK_REMOVE)
1087         return 0;
1088
1089     if (u->source->suspend_cause & PA_SUSPEND_SESSION)
1090         return 0;
1091
1092     if (mask & SND_CTL_EVENT_MASK_VALUE)
1093         pa_source_update_volume_and_mute(u->source);
1094
1095     return 0;
1096 }
1097
1098 static void source_get_volume_cb(pa_source *s) {
1099     struct userdata *u = s->userdata;
1100     pa_cvolume r;
1101     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1102
1103     pa_assert(u);
1104     pa_assert(u->mixer_path);
1105     pa_assert(u->mixer_handle);
1106
1107     if (pa_alsa_path_get_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r) < 0)
1108         return;
1109
1110     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1111     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1112
1113     pa_log_debug("Read hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1114
1115     if (u->mixer_path->has_dB) {
1116         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1117
1118         pa_log_debug("               in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &r));
1119     }
1120
1121     if (pa_cvolume_equal(&u->hardware_volume, &r))
1122         return;
1123
1124     s->real_volume = u->hardware_volume = r;
1125
1126     /* Hmm, so the hardware volume changed, let's reset our software volume */
1127     if (u->mixer_path->has_dB)
1128         pa_source_set_soft_volume(s, NULL);
1129 }
1130
1131 static void source_set_volume_cb(pa_source *s) {
1132     struct userdata *u = s->userdata;
1133     pa_cvolume r;
1134     char vol_str_pcnt[PA_CVOLUME_SNPRINT_MAX];
1135     pa_bool_t deferred_volume = !!(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1136
1137     pa_assert(u);
1138     pa_assert(u->mixer_path);
1139     pa_assert(u->mixer_handle);
1140
1141     /* Shift up by the base volume */
1142     pa_sw_cvolume_divide_scalar(&r, &s->real_volume, s->base_volume);
1143
1144     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &r, deferred_volume, !deferred_volume) < 0)
1145         return;
1146
1147     /* Shift down by the base volume, so that 0dB becomes maximum volume */
1148     pa_sw_cvolume_multiply_scalar(&r, &r, s->base_volume);
1149
1150     u->hardware_volume = r;
1151
1152     if (u->mixer_path->has_dB) {
1153         pa_cvolume new_soft_volume;
1154         pa_bool_t accurate_enough;
1155         char vol_str_db[PA_SW_CVOLUME_SNPRINT_DB_MAX];
1156
1157         /* Match exactly what the user requested by software */
1158         pa_sw_cvolume_divide(&new_soft_volume, &s->real_volume, &u->hardware_volume);
1159
1160         /* If the adjustment to do in software is only minimal we
1161          * can skip it. That saves us CPU at the expense of a bit of
1162          * accuracy */
1163         accurate_enough =
1164             (pa_cvolume_min(&new_soft_volume) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1165             (pa_cvolume_max(&new_soft_volume) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1166
1167         pa_log_debug("Requested volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &s->real_volume));
1168         pa_log_debug("           in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &s->real_volume));
1169         pa_log_debug("Got hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &u->hardware_volume));
1170         pa_log_debug("              in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &u->hardware_volume));
1171         pa_log_debug("Calculated software volume: %s (accurate-enough=%s)",
1172                      pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &new_soft_volume),
1173                      pa_yes_no(accurate_enough));
1174         pa_log_debug("                     in dB: %s", pa_sw_cvolume_snprint_dB(vol_str_db, sizeof(vol_str_db), &new_soft_volume));
1175
1176         if (!accurate_enough)
1177             s->soft_volume = new_soft_volume;
1178
1179     } else {
1180         pa_log_debug("Wrote hardware volume: %s", pa_cvolume_snprint(vol_str_pcnt, sizeof(vol_str_pcnt), &r));
1181
1182         /* We can't match exactly what the user requested, hence let's
1183          * at least tell the user about it */
1184
1185         s->real_volume = r;
1186     }
1187 }
1188
1189 static void source_write_volume_cb(pa_source *s) {
1190     struct userdata *u = s->userdata;
1191     pa_cvolume hw_vol = s->thread_info.current_hw_volume;
1192
1193     pa_assert(u);
1194     pa_assert(u->mixer_path);
1195     pa_assert(u->mixer_handle);
1196     pa_assert(s->flags & PA_SOURCE_DEFERRED_VOLUME);
1197
1198     /* Shift up by the base volume */
1199     pa_sw_cvolume_divide_scalar(&hw_vol, &hw_vol, s->base_volume);
1200
1201     if (pa_alsa_path_set_volume(u->mixer_path, u->mixer_handle, &s->channel_map, &hw_vol, TRUE, TRUE) < 0)
1202         pa_log_error("Writing HW volume failed");
1203     else {
1204         pa_cvolume tmp_vol;
1205         pa_bool_t accurate_enough;
1206
1207         /* Shift down by the base volume, so that 0dB becomes maximum volume */
1208         pa_sw_cvolume_multiply_scalar(&hw_vol, &hw_vol, s->base_volume);
1209
1210         pa_sw_cvolume_divide(&tmp_vol, &hw_vol, &s->thread_info.current_hw_volume);
1211         accurate_enough =
1212             (pa_cvolume_min(&tmp_vol) >= (PA_VOLUME_NORM - VOLUME_ACCURACY)) &&
1213             (pa_cvolume_max(&tmp_vol) <= (PA_VOLUME_NORM + VOLUME_ACCURACY));
1214
1215         if (!accurate_enough) {
1216             union {
1217                 char db[2][PA_SW_CVOLUME_SNPRINT_DB_MAX];
1218                 char pcnt[2][PA_CVOLUME_SNPRINT_MAX];
1219             } vol;
1220
1221             pa_log_debug("Written HW volume did not match with the request: %s (request) != %s",
1222                          pa_cvolume_snprint(vol.pcnt[0], sizeof(vol.pcnt[0]), &s->thread_info.current_hw_volume),
1223                          pa_cvolume_snprint(vol.pcnt[1], sizeof(vol.pcnt[1]), &hw_vol));
1224             pa_log_debug("                                           in dB: %s (request) != %s",
1225                          pa_sw_cvolume_snprint_dB(vol.db[0], sizeof(vol.db[0]), &s->thread_info.current_hw_volume),
1226                          pa_sw_cvolume_snprint_dB(vol.db[1], sizeof(vol.db[1]), &hw_vol));
1227         }
1228     }
1229 }
1230
1231 static void source_get_mute_cb(pa_source *s) {
1232     struct userdata *u = s->userdata;
1233     pa_bool_t b;
1234
1235     pa_assert(u);
1236     pa_assert(u->mixer_path);
1237     pa_assert(u->mixer_handle);
1238
1239     if (pa_alsa_path_get_mute(u->mixer_path, u->mixer_handle, &b) < 0)
1240         return;
1241
1242     s->muted = b;
1243 }
1244
1245 static void source_set_mute_cb(pa_source *s) {
1246     struct userdata *u = s->userdata;
1247
1248     pa_assert(u);
1249     pa_assert(u->mixer_path);
1250     pa_assert(u->mixer_handle);
1251
1252     pa_alsa_path_set_mute(u->mixer_path, u->mixer_handle, s->muted);
1253 }
1254
1255 static void mixer_volume_init(struct userdata *u) {
1256     pa_assert(u);
1257
1258     if (!u->mixer_path->has_volume) {
1259         pa_source_set_write_volume_callback(u->source, NULL);
1260         pa_source_set_get_volume_callback(u->source, NULL);
1261         pa_source_set_set_volume_callback(u->source, NULL);
1262
1263         pa_log_info("Driver does not support hardware volume control, falling back to software volume control.");
1264     } else {
1265         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1266         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1267
1268         if (u->mixer_path->has_dB && u->deferred_volume) {
1269             pa_source_set_write_volume_callback(u->source, source_write_volume_cb);
1270             pa_log_info("Successfully enabled synchronous volume.");
1271         } else
1272             pa_source_set_write_volume_callback(u->source, NULL);
1273
1274         if (u->mixer_path->has_dB) {
1275             pa_source_enable_decibel_volume(u->source, TRUE);
1276             pa_log_info("Hardware volume ranges from %0.2f dB to %0.2f dB.", u->mixer_path->min_dB, u->mixer_path->max_dB);
1277
1278             u->source->base_volume = pa_sw_volume_from_dB(-u->mixer_path->max_dB);
1279             u->source->n_volume_steps = PA_VOLUME_NORM+1;
1280
1281             pa_log_info("Fixing base volume to %0.2f dB", pa_sw_volume_to_dB(u->source->base_volume));
1282         } else {
1283             pa_source_enable_decibel_volume(u->source, FALSE);
1284             pa_log_info("Hardware volume ranges from %li to %li.", u->mixer_path->min_volume, u->mixer_path->max_volume);
1285
1286             u->source->base_volume = PA_VOLUME_NORM;
1287             u->source->n_volume_steps = u->mixer_path->max_volume - u->mixer_path->min_volume + 1;
1288         }
1289
1290         pa_log_info("Using hardware volume control. Hardware dB scale %s.", u->mixer_path->has_dB ? "supported" : "not supported");
1291     }
1292
1293     if (!u->mixer_path->has_mute) {
1294         pa_source_set_get_mute_callback(u->source, NULL);
1295         pa_source_set_set_mute_callback(u->source, NULL);
1296         pa_log_info("Driver does not support hardware mute control, falling back to software mute control.");
1297     } else {
1298         pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1299         pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1300         pa_log_info("Using hardware mute control.");
1301     }
1302 }
1303
1304 static int source_set_port_cb(pa_source *s, pa_device_port *p) {
1305     struct userdata *u = s->userdata;
1306     pa_alsa_port_data *data;
1307
1308     pa_assert(u);
1309     pa_assert(p);
1310     pa_assert(u->mixer_handle);
1311
1312     data = PA_DEVICE_PORT_DATA(p);
1313
1314     pa_assert_se(u->mixer_path = data->path);
1315     pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1316
1317     mixer_volume_init(u);
1318
1319     if (data->setting)
1320         pa_alsa_setting_select(data->setting, u->mixer_handle);
1321
1322     if (s->set_mute)
1323         s->set_mute(s);
1324     if (s->set_volume)
1325         s->set_volume(s);
1326
1327     return 0;
1328 }
1329
1330 static void source_update_requested_latency_cb(pa_source *s) {
1331     struct userdata *u = s->userdata;
1332     pa_assert(u);
1333     pa_assert(u->use_tsched); /* only when timer scheduling is used
1334                                * we can dynamically adjust the
1335                                * latency */
1336
1337     if (!u->pcm_handle)
1338         return;
1339
1340     update_sw_params(u);
1341 }
1342
1343 static void thread_func(void *userdata) {
1344     struct userdata *u = userdata;
1345     unsigned short revents = 0;
1346
1347     pa_assert(u);
1348
1349     pa_log_debug("Thread starting up");
1350
1351     if (u->core->realtime_scheduling)
1352         pa_make_realtime(u->core->realtime_priority);
1353
1354     pa_thread_mq_install(&u->thread_mq);
1355
1356     for (;;) {
1357         int ret;
1358         pa_usec_t rtpoll_sleep = 0;
1359
1360 #ifdef DEBUG_TIMING
1361         pa_log_debug("Loop");
1362 #endif
1363
1364         /* Read some data and pass it to the sources */
1365         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1366             int work_done;
1367             pa_usec_t sleep_usec = 0;
1368             pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll);
1369
1370             if (u->first) {
1371                 pa_log_info("Starting capture.");
1372                 snd_pcm_start(u->pcm_handle);
1373
1374                 pa_smoother_resume(u->smoother, pa_rtclock_now(), TRUE);
1375
1376                 u->first = FALSE;
1377             }
1378
1379             if (u->use_mmap)
1380                 work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1381             else
1382                 work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout);
1383
1384             if (work_done < 0)
1385                 goto fail;
1386
1387 /*             pa_log_debug("work_done = %i", work_done); */
1388
1389             if (work_done)
1390                 update_smoother(u);
1391
1392             if (u->use_tsched) {
1393                 pa_usec_t cusec;
1394
1395                 /* OK, the capture buffer is now empty, let's
1396                  * calculate when to wake up next */
1397
1398 /*                 pa_log_debug("Waking up in %0.2fms (sound card clock).", (double) sleep_usec / PA_USEC_PER_MSEC); */
1399
1400                 /* Convert from the sound card time domain to the
1401                  * system time domain */
1402                 cusec = pa_smoother_translate(u->smoother, pa_rtclock_now(), sleep_usec);
1403
1404 /*                 pa_log_debug("Waking up in %0.2fms (system clock).", (double) cusec / PA_USEC_PER_MSEC); */
1405
1406                 /* We don't trust the conversion, so we wake up whatever comes first */
1407                 rtpoll_sleep = PA_MIN(sleep_usec, cusec);
1408             }
1409         }
1410
1411         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1412             pa_usec_t volume_sleep;
1413             pa_source_volume_change_apply(u->source, &volume_sleep);
1414             if (volume_sleep > 0)
1415                 rtpoll_sleep = PA_MIN(volume_sleep, rtpoll_sleep);
1416         }
1417
1418         if (rtpoll_sleep > 0)
1419             pa_rtpoll_set_timer_relative(u->rtpoll, rtpoll_sleep);
1420         else
1421             pa_rtpoll_set_timer_disabled(u->rtpoll);
1422
1423         /* Hmm, nothing to do. Let's sleep */
1424         if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
1425             goto fail;
1426
1427         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME)
1428             pa_source_volume_change_apply(u->source, NULL);
1429
1430         if (ret == 0)
1431             goto finish;
1432
1433         /* Tell ALSA about this and process its response */
1434         if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) {
1435             struct pollfd *pollfd;
1436             int err;
1437             unsigned n;
1438
1439             pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
1440
1441             if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
1442                 pa_log("snd_pcm_poll_descriptors_revents() failed: %s", pa_alsa_strerror(err));
1443                 goto fail;
1444             }
1445
1446             if (revents & ~POLLIN) {
1447                 if (pa_alsa_recover_from_poll(u->pcm_handle, revents) < 0)
1448                     goto fail;
1449
1450                 u->first = TRUE;
1451                 revents = 0;
1452             } else if (revents && u->use_tsched && pa_log_ratelimit(PA_LOG_DEBUG))
1453                 pa_log_debug("Wakeup from ALSA!");
1454
1455         } else
1456             revents = 0;
1457     }
1458
1459 fail:
1460     /* If this was no regular exit from the loop we have to continue
1461      * processing messages until we received PA_MESSAGE_SHUTDOWN */
1462     pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
1463     pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
1464
1465 finish:
1466     pa_log_debug("Thread shutting down");
1467 }
1468
1469 static void set_source_name(pa_source_new_data *data, pa_modargs *ma, const char *device_id, const char *device_name, pa_alsa_mapping *mapping) {
1470     const char *n;
1471     char *t;
1472
1473     pa_assert(data);
1474     pa_assert(ma);
1475     pa_assert(device_name);
1476
1477     if ((n = pa_modargs_get_value(ma, "source_name", NULL))) {
1478         pa_source_new_data_set_name(data, n);
1479         data->namereg_fail = TRUE;
1480         return;
1481     }
1482
1483     if ((n = pa_modargs_get_value(ma, "name", NULL)))
1484         data->namereg_fail = TRUE;
1485     else {
1486         n = device_id ? device_id : device_name;
1487         data->namereg_fail = FALSE;
1488     }
1489
1490     if (mapping)
1491         t = pa_sprintf_malloc("alsa_input.%s.%s", n, mapping->name);
1492     else
1493         t = pa_sprintf_malloc("alsa_input.%s", n);
1494
1495     pa_source_new_data_set_name(data, t);
1496     pa_xfree(t);
1497 }
1498
1499 static void find_mixer(struct userdata *u, pa_alsa_mapping *mapping, const char *element, pa_bool_t ignore_dB) {
1500
1501     if (!mapping && !element)
1502         return;
1503
1504     if (!(u->mixer_handle = pa_alsa_open_mixer_for_pcm(u->pcm_handle, &u->control_device))) {
1505         pa_log_info("Failed to find a working mixer device.");
1506         return;
1507     }
1508
1509     if (element) {
1510
1511         if (!(u->mixer_path = pa_alsa_path_synthesize(element, PA_ALSA_DIRECTION_INPUT)))
1512             goto fail;
1513
1514         if (pa_alsa_path_probe(u->mixer_path, u->mixer_handle, ignore_dB) < 0)
1515             goto fail;
1516
1517         pa_log_debug("Probed mixer path %s:", u->mixer_path->name);
1518         pa_alsa_path_dump(u->mixer_path);
1519     } else {
1520
1521         if (!(u->mixer_path_set = pa_alsa_path_set_new(mapping, PA_ALSA_DIRECTION_INPUT)))
1522             goto fail;
1523
1524         pa_alsa_path_set_probe(u->mixer_path_set, u->mixer_handle, ignore_dB);
1525     }
1526
1527     return;
1528
1529 fail:
1530
1531     if (u->mixer_path_set) {
1532         pa_alsa_path_set_free(u->mixer_path_set);
1533         u->mixer_path_set = NULL;
1534     } else if (u->mixer_path) {
1535         pa_alsa_path_free(u->mixer_path);
1536         u->mixer_path = NULL;
1537     }
1538
1539     if (u->mixer_handle) {
1540         snd_mixer_close(u->mixer_handle);
1541         u->mixer_handle = NULL;
1542     }
1543 }
1544
1545 static int setup_mixer(struct userdata *u, pa_bool_t ignore_dB) {
1546     pa_bool_t need_mixer_callback = FALSE;
1547
1548     pa_assert(u);
1549
1550     if (!u->mixer_handle)
1551         return 0;
1552
1553     if (u->source->active_port) {
1554         pa_alsa_port_data *data;
1555
1556         /* We have a list of supported paths, so let's activate the
1557          * one that has been chosen as active */
1558
1559         data = PA_DEVICE_PORT_DATA(u->source->active_port);
1560         u->mixer_path = data->path;
1561
1562         pa_alsa_path_select(data->path, u->mixer_handle);
1563
1564         if (data->setting)
1565             pa_alsa_setting_select(data->setting, u->mixer_handle);
1566
1567     } else {
1568
1569         if (!u->mixer_path && u->mixer_path_set)
1570             u->mixer_path = u->mixer_path_set->paths;
1571
1572         if (u->mixer_path) {
1573             /* Hmm, we have only a single path, then let's activate it */
1574
1575             pa_alsa_path_select(u->mixer_path, u->mixer_handle);
1576
1577             if (u->mixer_path->settings)
1578                 pa_alsa_setting_select(u->mixer_path->settings, u->mixer_handle);
1579         } else
1580             return 0;
1581     }
1582
1583     mixer_volume_init(u);
1584
1585     /* Will we need to register callbacks? */
1586     if (u->mixer_path_set && u->mixer_path_set->paths) {
1587         pa_alsa_path *p;
1588
1589         PA_LLIST_FOREACH(p, u->mixer_path_set->paths) {
1590             if (p->has_volume || p->has_mute)
1591                 need_mixer_callback = TRUE;
1592         }
1593     }
1594     else if (u->mixer_path)
1595         need_mixer_callback = u->mixer_path->has_volume || u->mixer_path->has_mute;
1596
1597     if (need_mixer_callback) {
1598         int (*mixer_callback)(snd_mixer_elem_t *, unsigned int);
1599         if (u->source->flags & PA_SOURCE_DEFERRED_VOLUME) {
1600             u->mixer_pd = pa_alsa_mixer_pdata_new();
1601             mixer_callback = io_mixer_callback;
1602
1603             if (pa_alsa_set_mixer_rtpoll(u->mixer_pd, u->mixer_handle, u->rtpoll) < 0) {
1604                 pa_log("Failed to initialize file descriptor monitoring");
1605                 return -1;
1606             }
1607         } else {
1608             u->mixer_fdl = pa_alsa_fdlist_new();
1609             mixer_callback = ctl_mixer_callback;
1610
1611             if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, u->core->mainloop) < 0) {
1612                 pa_log("Failed to initialize file descriptor monitoring");
1613                 return -1;
1614             }
1615         }
1616
1617         if (u->mixer_path_set)
1618             pa_alsa_path_set_set_callback(u->mixer_path_set, u->mixer_handle, mixer_callback, u);
1619         else
1620             pa_alsa_path_set_callback(u->mixer_path, u->mixer_handle, mixer_callback, u);
1621     }
1622
1623     return 0;
1624 }
1625
1626 pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, pa_card *card, pa_alsa_mapping *mapping) {
1627
1628     struct userdata *u = NULL;
1629     const char *dev_id = NULL;
1630     pa_sample_spec ss, requested_ss;
1631     pa_channel_map map;
1632     uint32_t nfrags, frag_size, buffer_size, tsched_size, tsched_watermark;
1633     snd_pcm_uframes_t period_frames, buffer_frames, tsched_frames;
1634     size_t frame_size;
1635     pa_bool_t use_mmap = TRUE, b, use_tsched = TRUE, d, ignore_dB = FALSE, namereg_fail = FALSE, deferred_volume = FALSE;
1636     pa_source_new_data data;
1637     pa_alsa_profile_set *profile_set = NULL;
1638
1639     pa_assert(m);
1640     pa_assert(ma);
1641
1642     ss = m->core->default_sample_spec;
1643     map = m->core->default_channel_map;
1644     if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
1645         pa_log("Failed to parse sample specification and channel map");
1646         goto fail;
1647     }
1648
1649     requested_ss = ss;
1650     frame_size = pa_frame_size(&ss);
1651
1652     nfrags = m->core->default_n_fragments;
1653     frag_size = (uint32_t) pa_usec_to_bytes(m->core->default_fragment_size_msec*PA_USEC_PER_MSEC, &ss);
1654     if (frag_size <= 0)
1655         frag_size = (uint32_t) frame_size;
1656     tsched_size = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_BUFFER_USEC, &ss);
1657     tsched_watermark = (uint32_t) pa_usec_to_bytes(DEFAULT_TSCHED_WATERMARK_USEC, &ss);
1658
1659     if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 ||
1660         pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0 ||
1661         pa_modargs_get_value_u32(ma, "tsched_buffer_size", &tsched_size) < 0 ||
1662         pa_modargs_get_value_u32(ma, "tsched_buffer_watermark", &tsched_watermark) < 0) {
1663         pa_log("Failed to parse buffer metrics");
1664         goto fail;
1665     }
1666
1667     buffer_size = nfrags * frag_size;
1668
1669     period_frames = frag_size/frame_size;
1670     buffer_frames = buffer_size/frame_size;
1671     tsched_frames = tsched_size/frame_size;
1672
1673     if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
1674         pa_log("Failed to parse mmap argument.");
1675         goto fail;
1676     }
1677
1678     if (pa_modargs_get_value_boolean(ma, "tsched", &use_tsched) < 0) {
1679         pa_log("Failed to parse tsched argument.");
1680         goto fail;
1681     }
1682
1683     if (pa_modargs_get_value_boolean(ma, "ignore_dB", &ignore_dB) < 0) {
1684         pa_log("Failed to parse ignore_dB argument.");
1685         goto fail;
1686     }
1687
1688     deferred_volume = m->core->deferred_volume;
1689     if (pa_modargs_get_value_boolean(ma, "deferred_volume", &deferred_volume) < 0) {
1690         pa_log("Failed to parse deferred_volume argument.");
1691         goto fail;
1692     }
1693
1694     use_tsched = pa_alsa_may_tsched(use_tsched);
1695
1696     u = pa_xnew0(struct userdata, 1);
1697     u->core = m->core;
1698     u->module = m;
1699     u->use_mmap = use_mmap;
1700     u->use_tsched = use_tsched;
1701     u->deferred_volume = deferred_volume;
1702     u->first = TRUE;
1703     u->rtpoll = pa_rtpoll_new();
1704     pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1705
1706     u->smoother = pa_smoother_new(
1707             SMOOTHER_ADJUST_USEC,
1708             SMOOTHER_WINDOW_USEC,
1709             TRUE,
1710             TRUE,
1711             5,
1712             pa_rtclock_now(),
1713             TRUE);
1714     u->smoother_interval = SMOOTHER_MIN_INTERVAL;
1715
1716     dev_id = pa_modargs_get_value(
1717             ma, "device_id",
1718             pa_modargs_get_value(ma, "device", DEFAULT_DEVICE));
1719
1720     if (reserve_init(u, dev_id) < 0)
1721         goto fail;
1722
1723     if (reserve_monitor_init(u, dev_id) < 0)
1724         goto fail;
1725
1726     b = use_mmap;
1727     d = use_tsched;
1728
1729     if (mapping) {
1730
1731         if (!(dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1732             pa_log("device_id= not set");
1733             goto fail;
1734         }
1735
1736         if (!(u->pcm_handle = pa_alsa_open_by_device_id_mapping(
1737                       dev_id,
1738                       &u->device_name,
1739                       &ss, &map,
1740                       SND_PCM_STREAM_CAPTURE,
1741                       &period_frames, &buffer_frames, tsched_frames,
1742                       &b, &d, mapping)))
1743             goto fail;
1744
1745     } else if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
1746
1747         if (!(profile_set = pa_alsa_profile_set_new(NULL, &map)))
1748             goto fail;
1749
1750         if (!(u->pcm_handle = pa_alsa_open_by_device_id_auto(
1751                       dev_id,
1752                       &u->device_name,
1753                       &ss, &map,
1754                       SND_PCM_STREAM_CAPTURE,
1755                       &period_frames, &buffer_frames, tsched_frames,
1756                       &b, &d, profile_set, &mapping)))
1757             goto fail;
1758
1759     } else {
1760
1761         if (!(u->pcm_handle = pa_alsa_open_by_device_string(
1762                       pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
1763                       &u->device_name,
1764                       &ss, &map,
1765                       SND_PCM_STREAM_CAPTURE,
1766                       &period_frames, &buffer_frames, tsched_frames,
1767                       &b, &d, FALSE)))
1768             goto fail;
1769     }
1770
1771     pa_assert(u->device_name);
1772     pa_log_info("Successfully opened device %s.", u->device_name);
1773
1774     if (pa_alsa_pcm_is_modem(u->pcm_handle)) {
1775         pa_log_notice("Device %s is modem, refusing further initialization.", u->device_name);
1776         goto fail;
1777     }
1778
1779     if (mapping)
1780         pa_log_info("Selected mapping '%s' (%s).", mapping->description, mapping->name);
1781
1782     if (use_mmap && !b) {
1783         pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
1784         u->use_mmap = use_mmap = FALSE;
1785     }
1786
1787     if (use_tsched && (!b || !d)) {
1788         pa_log_info("Cannot enable timer-based scheduling, falling back to sound IRQ scheduling.");
1789         u->use_tsched = use_tsched = FALSE;
1790     }
1791
1792     if (u->use_mmap)
1793         pa_log_info("Successfully enabled mmap() mode.");
1794
1795     if (u->use_tsched)
1796         pa_log_info("Successfully enabled timer-based scheduling mode.");
1797
1798     /* ALSA might tweak the sample spec, so recalculate the frame size */
1799     frame_size = pa_frame_size(&ss);
1800
1801     find_mixer(u, mapping, pa_modargs_get_value(ma, "control", NULL), ignore_dB);
1802
1803     pa_source_new_data_init(&data);
1804     data.driver = driver;
1805     data.module = m;
1806     data.card = card;
1807     set_source_name(&data, ma, dev_id, u->device_name, mapping);
1808
1809     /* We need to give pa_modargs_get_value_boolean() a pointer to a local
1810      * variable instead of using &data.namereg_fail directly, because
1811      * data.namereg_fail is a bitfield and taking the address of a bitfield
1812      * variable is impossible. */
1813     namereg_fail = data.namereg_fail;
1814     if (pa_modargs_get_value_boolean(ma, "namereg_fail", &namereg_fail) < 0) {
1815         pa_log("Failed to parse namereg_fail argument.");
1816         pa_source_new_data_done(&data);
1817         goto fail;
1818     }
1819     data.namereg_fail = namereg_fail;
1820
1821     pa_source_new_data_set_sample_spec(&data, &ss);
1822     pa_source_new_data_set_channel_map(&data, &map);
1823
1824     pa_alsa_init_proplist_pcm(m->core, data.proplist, u->pcm_handle);
1825     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_STRING, u->device_name);
1826     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_BUFFER_SIZE, "%lu", (unsigned long) (buffer_frames * frame_size));
1827     pa_proplist_setf(data.proplist, PA_PROP_DEVICE_BUFFERING_FRAGMENT_SIZE, "%lu", (unsigned long) (period_frames * frame_size));
1828     pa_proplist_sets(data.proplist, PA_PROP_DEVICE_ACCESS_MODE, u->use_tsched ? "mmap+timer" : (u->use_mmap ? "mmap" : "serial"));
1829
1830     if (mapping) {
1831         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_NAME, mapping->name);
1832         pa_proplist_sets(data.proplist, PA_PROP_DEVICE_PROFILE_DESCRIPTION, mapping->description);
1833     }
1834
1835     pa_alsa_init_description(data.proplist);
1836
1837     if (u->control_device)
1838         pa_alsa_init_proplist_ctl(data.proplist, u->control_device);
1839
1840     if (pa_modargs_get_proplist(ma, "source_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1841         pa_log("Invalid properties");
1842         pa_source_new_data_done(&data);
1843         goto fail;
1844     }
1845
1846     if (u->mixer_path_set)
1847         pa_alsa_add_ports(&data.ports, u->mixer_path_set);
1848
1849     u->source = pa_source_new(m->core, &data, PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|(u->use_tsched ? PA_SOURCE_DYNAMIC_LATENCY : 0));
1850     pa_source_new_data_done(&data);
1851
1852     if (!u->source) {
1853         pa_log("Failed to create source object");
1854         goto fail;
1855     }
1856
1857     if (pa_modargs_get_value_u32(ma, "deferred_volume_safety_margin",
1858                                  &u->source->thread_info.volume_change_safety_margin) < 0) {
1859         pa_log("Failed to parse deferred_volume_safety_margin parameter");
1860         goto fail;
1861     }
1862
1863     if (pa_modargs_get_value_s32(ma, "deferred_volume_extra_delay",
1864                                  &u->source->thread_info.volume_change_extra_delay) < 0) {
1865         pa_log("Failed to parse deferred_volume_extra_delay parameter");
1866         goto fail;
1867     }
1868
1869     u->source->parent.process_msg = source_process_msg;
1870     if (u->use_tsched)
1871         u->source->update_requested_latency = source_update_requested_latency_cb;
1872     u->source->set_state = source_set_state_cb;
1873     u->source->set_port = source_set_port_cb;
1874     u->source->userdata = u;
1875
1876     pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
1877     pa_source_set_rtpoll(u->source, u->rtpoll);
1878
1879     u->frame_size = frame_size;
1880     u->fragment_size = frag_size = (size_t) (period_frames * frame_size);
1881     u->hwbuf_size = buffer_size = (size_t) (buffer_frames * frame_size);
1882     pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels);
1883
1884     pa_log_info("Using %0.1f fragments of size %lu bytes (%0.2fms), buffer size is %lu bytes (%0.2fms)",
1885                 (double) u->hwbuf_size / (double) u->fragment_size,
1886                 (long unsigned) u->fragment_size,
1887                 (double) pa_bytes_to_usec(u->fragment_size, &ss) / PA_USEC_PER_MSEC,
1888                 (long unsigned) u->hwbuf_size,
1889                 (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC);
1890
1891     if (u->use_tsched) {
1892         u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec);
1893
1894         u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec);
1895         u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec);
1896
1897         u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec);
1898         u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec);
1899
1900         fix_min_sleep_wakeup(u);
1901         fix_tsched_watermark(u);
1902
1903         pa_source_set_latency_range(u->source,
1904                                     0,
1905                                     pa_bytes_to_usec(u->hwbuf_size, &ss));
1906
1907         pa_log_info("Time scheduling watermark is %0.2fms",
1908                     (double) pa_bytes_to_usec(u->tsched_watermark, &ss) / PA_USEC_PER_MSEC);
1909     } else
1910         pa_source_set_fixed_latency(u->source, pa_bytes_to_usec(u->hwbuf_size, &ss));
1911
1912     reserve_update(u);
1913
1914     if (update_sw_params(u) < 0)
1915         goto fail;
1916
1917     if (setup_mixer(u, ignore_dB) < 0)
1918         goto fail;
1919
1920     pa_alsa_dump(PA_LOG_DEBUG, u->pcm_handle);
1921
1922     if (!(u->thread = pa_thread_new("alsa-source", thread_func, u))) {
1923         pa_log("Failed to create thread.");
1924         goto fail;
1925     }
1926
1927     /* Get initial mixer settings */
1928     if (data.volume_is_set) {
1929         if (u->source->set_volume)
1930             u->source->set_volume(u->source);
1931     } else {
1932         if (u->source->get_volume)
1933             u->source->get_volume(u->source);
1934     }
1935
1936     if (data.muted_is_set) {
1937         if (u->source->set_mute)
1938             u->source->set_mute(u->source);
1939     } else {
1940         if (u->source->get_mute)
1941             u->source->get_mute(u->source);
1942     }
1943
1944     if ((data.volume_is_set || data.muted_is_set) && u->source->write_volume)
1945         u->source->write_volume(u->source);
1946
1947     pa_source_put(u->source);
1948
1949     if (profile_set)
1950         pa_alsa_profile_set_free(profile_set);
1951
1952     return u->source;
1953
1954 fail:
1955
1956     if (u)
1957         userdata_free(u);
1958
1959     if (profile_set)
1960         pa_alsa_profile_set_free(profile_set);
1961
1962     return NULL;
1963 }
1964
1965 static void userdata_free(struct userdata *u) {
1966     pa_assert(u);
1967
1968     if (u->source)
1969         pa_source_unlink(u->source);
1970
1971     if (u->thread) {
1972         pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1973         pa_thread_free(u->thread);
1974     }
1975
1976     pa_thread_mq_done(&u->thread_mq);
1977
1978     if (u->source)
1979         pa_source_unref(u->source);
1980
1981     if (u->mixer_pd)
1982         pa_alsa_mixer_pdata_free(u->mixer_pd);
1983
1984     if (u->alsa_rtpoll_item)
1985         pa_rtpoll_item_free(u->alsa_rtpoll_item);
1986
1987     if (u->rtpoll)
1988         pa_rtpoll_free(u->rtpoll);
1989
1990     if (u->pcm_handle) {
1991         snd_pcm_drop(u->pcm_handle);
1992         snd_pcm_close(u->pcm_handle);
1993     }
1994
1995     if (u->mixer_fdl)
1996         pa_alsa_fdlist_free(u->mixer_fdl);
1997
1998     if (u->mixer_path_set)
1999         pa_alsa_path_set_free(u->mixer_path_set);
2000     else if (u->mixer_path)
2001         pa_alsa_path_free(u->mixer_path);
2002
2003     if (u->mixer_handle)
2004         snd_mixer_close(u->mixer_handle);
2005
2006     if (u->smoother)
2007         pa_smoother_free(u->smoother);
2008
2009     reserve_done(u);
2010     monitor_done(u);
2011
2012     pa_xfree(u->device_name);
2013     pa_xfree(u->control_device);
2014     pa_xfree(u);
2015 }
2016
2017 void pa_alsa_source_free(pa_source *s) {
2018     struct userdata *u;
2019
2020     pa_source_assert_ref(s);
2021     pa_assert_se(u = s->userdata);
2022
2023     userdata_free(u);
2024 }