2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
63 data->proplist = pa_proplist_new();
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
113 pa_proplist_free(data->proplist);
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
142 void pa_device_port_free(pa_device_port *p) {
146 pa_xfree(p->description);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
194 pa_namereg_unregister(core, name);
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
217 if (!data->muted_is_set)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
229 pa_namereg_unregister(core, name);
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
237 s->state = PA_SINK_INIT;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
247 s->priority = pa_device_init_priority(s->proplist);
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
252 s->inputs = pa_idxset_new(NULL, NULL);
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
267 /* As a minor optimization we just steal the list instead of
269 s->ports = data->ports;
272 s->active_port = NULL;
273 s->save_port = FALSE;
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
279 if (!s->active_port && s->ports) {
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
291 pa_silence_memchunk_get(
292 &core->silence_cache,
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
344 pa_source_new_data_done(&source_data);
346 if (!s->monitor_source) {
352 s->monitor_source->monitor_of = s;
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
368 pa_assert_ctl_context();
370 if (s->state == state)
373 original_state = s->state;
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
380 if ((ret = s->set_state(s, state)) < 0)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
387 s->set_state(s, original_state);
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
399 if (suspend_change) {
403 /* We're suspending or resuming, tell everyone about it */
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
410 i->suspend(i, state == PA_SINK_SUSPENDED);
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
424 pa_assert(s->state == PA_SINK_INIT);
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
460 pa_source_put(s->monitor_source);
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
469 pa_sink_input *i, *j = NULL;
472 pa_assert_ctl_context();
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
482 linked = PA_SINK_IS_LINKED(s->state);
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
496 pa_sink_input_kill(i);
501 sink_set_state(s, PA_SINK_UNLINKED);
503 s->state = PA_SINK_UNLINKED;
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
525 if (PA_SINK_IS_LINKED(s->state))
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
535 pa_idxset_free(s->inputs, NULL, NULL);
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
549 pa_proplist_free(s->proplist);
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
557 pa_hashmap_free(s->ports, NULL, NULL);
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
585 s->flags = (s->flags & ~mask) | (value & mask);
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
599 s->thread_info.rtpoll = p;
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
611 if (s->state == PA_SINK_SUSPENDED)
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
658 pa_sink_input_ref(i);
660 if (pa_sink_input_start_move(i) >= 0)
663 pa_sink_input_unref(i);
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
682 pa_sink_input_unref(i);
685 pa_queue_free(q, NULL, NULL);
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
692 pa_assert_ctl_context();
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
700 pa_queue_free(q, NULL, NULL);
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
726 pa_log_debug("Processing rewind...");
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
743 size_t mixlength = *length;
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
762 info->userdata = pa_sink_input_ref(i);
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
783 unsigned n_unreffed = 0;
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
791 /* We optimize for the case where the order of the inputs has not changed */
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
795 pa_mix_info* m = NULL;
797 pa_sink_input_assert_ref(i);
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
802 if (info[p].userdata == i) {
813 pa_sink_input_drop(i, result->length);
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
822 if (m && m->chunk.memblock) {
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
843 pa_memblock_unref(c.memblock);
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
852 pa_sink_input_unref(m->userdata);
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
879 size_t block_size_max;
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
906 pa_assert(length > 0);
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
915 if (result->length > length)
916 result->length = length;
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
924 if (result->length > length)
925 result->length = length;
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
929 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
930 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
931 pa_memblock_unref(result->memblock);
932 pa_silence_memchunk_get(&s->core->silence_cache,
938 pa_memchunk_make_writable(result, 0);
939 pa_volume_memchunk(result, &s->sample_spec, &volume);
944 result->memblock = pa_memblock_new(s->core->mempool, length);
946 ptr = pa_memblock_acquire(result->memblock);
947 result->length = pa_mix(info, n,
950 &s->thread_info.soft_volume,
951 s->thread_info.soft_muted);
952 pa_memblock_release(result->memblock);
957 inputs_drop(s, info, n, result);
962 /* Called from IO thread context */
963 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
964 pa_mix_info info[MAX_MIX_CHANNELS];
966 size_t length, block_size_max;
968 pa_sink_assert_ref(s);
969 pa_sink_assert_io_context(s);
970 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
972 pa_assert(target->memblock);
973 pa_assert(target->length > 0);
974 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
976 pa_assert(!s->thread_info.rewind_requested);
977 pa_assert(s->thread_info.rewind_nbytes == 0);
979 if (s->thread_info.state == PA_SINK_SUSPENDED) {
980 pa_silence_memchunk(target, &s->sample_spec);
986 length = target->length;
987 block_size_max = pa_mempool_block_size_max(s->core->mempool);
988 if (length > block_size_max)
989 length = pa_frame_align(block_size_max, &s->sample_spec);
991 pa_assert(length > 0);
993 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
996 if (target->length > length)
997 target->length = length;
999 pa_silence_memchunk(target, &s->sample_spec);
1000 } else if (n == 1) {
1003 if (target->length > length)
1004 target->length = length;
1006 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1008 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1009 pa_silence_memchunk(target, &s->sample_spec);
1013 vchunk = info[0].chunk;
1014 pa_memblock_ref(vchunk.memblock);
1016 if (vchunk.length > length)
1017 vchunk.length = length;
1019 if (!pa_cvolume_is_norm(&volume)) {
1020 pa_memchunk_make_writable(&vchunk, 0);
1021 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1024 pa_memchunk_memcpy(target, &vchunk);
1025 pa_memblock_unref(vchunk.memblock);
1031 ptr = pa_memblock_acquire(target->memblock);
1033 target->length = pa_mix(info, n,
1034 (uint8_t*) ptr + target->index, length,
1036 &s->thread_info.soft_volume,
1037 s->thread_info.soft_muted);
1039 pa_memblock_release(target->memblock);
1042 inputs_drop(s, info, n, target);
1047 /* Called from IO thread context */
1048 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1052 pa_sink_assert_ref(s);
1053 pa_sink_assert_io_context(s);
1054 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1056 pa_assert(target->memblock);
1057 pa_assert(target->length > 0);
1058 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1060 pa_assert(!s->thread_info.rewind_requested);
1061 pa_assert(s->thread_info.rewind_nbytes == 0);
1063 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1064 pa_silence_memchunk(target, &s->sample_spec);
1077 pa_sink_render_into(s, &chunk);
1086 /* Called from IO thread context */
1087 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1088 pa_sink_assert_ref(s);
1089 pa_sink_assert_io_context(s);
1090 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1091 pa_assert(length > 0);
1092 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1095 pa_assert(!s->thread_info.rewind_requested);
1096 pa_assert(s->thread_info.rewind_nbytes == 0);
1100 pa_sink_render(s, length, result);
1102 if (result->length < length) {
1105 pa_memchunk_make_writable(result, length);
1107 chunk.memblock = result->memblock;
1108 chunk.index = result->index + result->length;
1109 chunk.length = length - result->length;
1111 pa_sink_render_into_full(s, &chunk);
1113 result->length = length;
1119 /* Called from main thread */
1120 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1123 pa_sink_assert_ref(s);
1124 pa_assert_ctl_context();
1125 pa_assert(PA_SINK_IS_LINKED(s->state));
1127 /* The returned value is supposed to be in the time domain of the sound card! */
1129 if (s->state == PA_SINK_SUSPENDED)
1132 if (!(s->flags & PA_SINK_LATENCY))
1135 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1140 /* Called from IO thread */
1141 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1145 pa_sink_assert_ref(s);
1146 pa_sink_assert_io_context(s);
1147 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1149 /* The returned value is supposed to be in the time domain of the sound card! */
1151 if (s->thread_info.state == PA_SINK_SUSPENDED)
1154 if (!(s->flags & PA_SINK_LATENCY))
1157 o = PA_MSGOBJECT(s);
1159 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1161 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1167 /* Called from main context */
1168 static void compute_reference_ratios(pa_sink *s) {
1172 pa_sink_assert_ref(s);
1173 pa_assert_ctl_context();
1174 pa_assert(PA_SINK_IS_LINKED(s->state));
1175 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1177 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1179 pa_cvolume remapped;
1182 * Calculates the reference volume from the sink's reference
1183 * volume. This basically calculates:
1185 * i->reference_ratio = i->volume / s->reference_volume
1188 remapped = s->reference_volume;
1189 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1191 i->reference_ratio.channels = i->sample_spec.channels;
1193 for (c = 0; c < i->sample_spec.channels; c++) {
1195 /* We don't update when the sink volume is 0 anyway */
1196 if (remapped.values[c] <= PA_VOLUME_MUTED)
1199 /* Don't update the reference ratio unless necessary */
1200 if (pa_sw_volume_multiply(
1201 i->reference_ratio.values[c],
1202 remapped.values[c]) == i->volume.values[c])
1205 i->reference_ratio.values[c] = pa_sw_volume_divide(
1206 i->volume.values[c],
1207 remapped.values[c]);
1212 /* Called from main context */
1213 static void compute_real_ratios(pa_sink *s) {
1217 pa_sink_assert_ref(s);
1218 pa_assert_ctl_context();
1219 pa_assert(PA_SINK_IS_LINKED(s->state));
1220 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1222 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1224 pa_cvolume remapped;
1227 * This basically calculates:
1229 * i->real_ratio := i->volume / s->real_volume
1230 * i->soft_volume := i->real_ratio * i->volume_factor
1233 remapped = s->real_volume;
1234 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1236 i->real_ratio.channels = i->sample_spec.channels;
1237 i->soft_volume.channels = i->sample_spec.channels;
1239 for (c = 0; c < i->sample_spec.channels; c++) {
1241 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1242 /* We leave i->real_ratio untouched */
1243 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1247 /* Don't lose accuracy unless necessary */
1248 if (pa_sw_volume_multiply(
1249 i->real_ratio.values[c],
1250 remapped.values[c]) != i->volume.values[c])
1252 i->real_ratio.values[c] = pa_sw_volume_divide(
1253 i->volume.values[c],
1254 remapped.values[c]);
1256 i->soft_volume.values[c] = pa_sw_volume_multiply(
1257 i->real_ratio.values[c],
1258 i->volume_factor.values[c]);
1261 /* We don't copy the soft_volume to the thread_info data
1262 * here. That must be done by the caller */
1266 /* Called from main thread */
1267 static void compute_real_volume(pa_sink *s) {
1271 pa_sink_assert_ref(s);
1272 pa_assert_ctl_context();
1273 pa_assert(PA_SINK_IS_LINKED(s->state));
1274 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1276 /* This determines the maximum volume of all streams and sets
1277 * s->real_volume accordingly. */
1279 if (pa_idxset_isempty(s->inputs)) {
1280 /* In the special case that we have no sink input we leave the
1281 * volume unmodified. */
1282 s->real_volume = s->reference_volume;
1286 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1288 /* First let's determine the new maximum volume of all inputs
1289 * connected to this sink */
1290 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1291 pa_cvolume remapped;
1293 remapped = i->volume;
1294 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1295 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1298 /* Then, let's update the real ratios/soft volumes of all inputs
1299 * connected to this sink */
1300 compute_real_ratios(s);
1303 /* Called from main thread */
1304 static void propagate_reference_volume(pa_sink *s) {
1308 pa_sink_assert_ref(s);
1309 pa_assert_ctl_context();
1310 pa_assert(PA_SINK_IS_LINKED(s->state));
1311 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1313 /* This is called whenever the sink volume changes that is not
1314 * caused by a sink input volume change. We need to fix up the
1315 * sink input volumes accordingly */
1317 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1318 pa_cvolume old_volume, remapped;
1320 old_volume = i->volume;
1322 /* This basically calculates:
1324 * i->volume := s->reference_volume * i->reference_ratio */
1326 remapped = s->reference_volume;
1327 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1328 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1330 /* The volume changed, let's tell people so */
1331 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1333 if (i->volume_changed)
1334 i->volume_changed(i);
1336 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1341 /* Called from main thread */
1342 void pa_sink_set_volume(
1344 const pa_cvolume *volume,
1348 pa_cvolume old_reference_volume;
1349 pa_bool_t reference_changed;
1351 pa_sink_assert_ref(s);
1352 pa_assert_ctl_context();
1353 pa_assert(PA_SINK_IS_LINKED(s->state));
1354 pa_assert(!volume || pa_cvolume_valid(volume));
1355 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1356 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1358 /* As a special exception we accept mono volumes on all sinks --
1359 * even on those with more complex channel maps */
1361 /* If volume is NULL we synchronize the sink's real and reference
1362 * volumes with the stream volumes. If it is not NULL we update
1363 * the reference_volume with it. */
1365 old_reference_volume = s->reference_volume;
1369 if (pa_cvolume_compatible(volume, &s->sample_spec))
1370 s->reference_volume = *volume;
1372 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1374 if (s->flags & PA_SINK_FLAT_VOLUME) {
1375 /* OK, propagate this volume change back to the inputs */
1376 propagate_reference_volume(s);
1378 /* And now recalculate the real volume */
1379 compute_real_volume(s);
1381 s->real_volume = s->reference_volume;
1384 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1386 /* Ok, let's determine the new real volume */
1387 compute_real_volume(s);
1389 /* Let's 'push' the reference volume if necessary */
1390 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1392 /* We need to fix the reference ratios of all streams now that
1393 * we changed the reference volume */
1394 compute_reference_ratios(s);
1397 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1398 s->save_volume = (!reference_changed && s->save_volume) || save;
1400 if (s->set_volume) {
1401 /* If we have a function set_volume(), then we do not apply a
1402 * soft volume by default. However, set_volume() is free to
1403 * apply one to s->soft_volume */
1405 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1409 /* If we have no function set_volume(), then the soft volume
1410 * becomes the virtual volume */
1411 s->soft_volume = s->real_volume;
1413 /* This tells the sink that soft and/or virtual volume changed */
1415 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1417 if (reference_changed)
1418 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1421 /* Called from main thread. Only to be called by sink implementor */
1422 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1423 pa_sink_assert_ref(s);
1424 pa_assert_ctl_context();
1427 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1429 s->soft_volume = *volume;
1431 if (PA_SINK_IS_LINKED(s->state))
1432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1434 s->thread_info.soft_volume = s->soft_volume;
1437 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1440 pa_cvolume old_reference_volume;
1442 pa_sink_assert_ref(s);
1443 pa_assert_ctl_context();
1444 pa_assert(PA_SINK_IS_LINKED(s->state));
1446 /* This is called when the hardware's real volume changes due to
1447 * some external event. We copy the real volume into our
1448 * reference volume and then rebuild the stream volumes based on
1449 * i->real_ratio which should stay fixed. */
1451 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1454 old_reference_volume = s->reference_volume;
1456 /* 1. Make the real volume the reference volume */
1457 s->reference_volume = s->real_volume;
1459 if (s->flags & PA_SINK_FLAT_VOLUME) {
1461 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1462 pa_cvolume old_volume, remapped;
1464 old_volume = i->volume;
1466 /* 2. Since the sink's reference and real volumes are equal
1467 * now our ratios should be too. */
1468 i->reference_ratio = i->real_ratio;
1470 /* 3. Recalculate the new stream reference volume based on the
1471 * reference ratio and the sink's reference volume.
1473 * This basically calculates:
1475 * i->volume = s->reference_volume * i->reference_ratio
1477 * This is identical to propagate_reference_volume() */
1478 remapped = s->reference_volume;
1479 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1480 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1482 /* Notify if something changed */
1483 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1485 if (i->volume_changed)
1486 i->volume_changed(i);
1488 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1493 /* Something got changed in the hardware. It probably makes sense
1494 * to save changed hw settings given that hw volume changes not
1495 * triggered by PA are almost certainly done by the user. */
1496 s->save_volume = TRUE;
1498 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1499 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1502 /* Called from main thread */
1503 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1504 pa_sink_assert_ref(s);
1505 pa_assert_ctl_context();
1506 pa_assert(PA_SINK_IS_LINKED(s->state));
1508 if (s->refresh_volume || force_refresh) {
1509 struct pa_cvolume old_real_volume;
1511 old_real_volume = s->real_volume;
1516 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1518 propagate_real_volume(s, &old_real_volume);
1521 return &s->reference_volume;
1524 /* Called from main thread */
1525 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1526 pa_cvolume old_real_volume;
1528 pa_sink_assert_ref(s);
1529 pa_assert_ctl_context();
1530 pa_assert(PA_SINK_IS_LINKED(s->state));
1532 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1534 old_real_volume = s->real_volume;
1535 s->real_volume = *new_real_volume;
1537 propagate_real_volume(s, &old_real_volume);
1540 /* Called from main thread */
1541 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1542 pa_bool_t old_muted;
1544 pa_sink_assert_ref(s);
1545 pa_assert_ctl_context();
1546 pa_assert(PA_SINK_IS_LINKED(s->state));
1548 old_muted = s->muted;
1550 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1555 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1557 if (old_muted != s->muted)
1558 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1561 /* Called from main thread */
1562 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1564 pa_sink_assert_ref(s);
1565 pa_assert_ctl_context();
1566 pa_assert(PA_SINK_IS_LINKED(s->state));
1568 if (s->refresh_muted || force_refresh) {
1569 pa_bool_t old_muted = s->muted;
1574 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1576 if (old_muted != s->muted) {
1577 s->save_muted = TRUE;
1579 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1581 /* Make sure the soft mute status stays in sync */
1582 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1589 /* Called from main thread */
1590 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1591 pa_sink_assert_ref(s);
1592 pa_assert_ctl_context();
1593 pa_assert(PA_SINK_IS_LINKED(s->state));
1595 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1597 if (s->muted == new_muted)
1600 s->muted = new_muted;
1601 s->save_muted = TRUE;
1603 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1606 /* Called from main thread */
1607 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1608 pa_sink_assert_ref(s);
1609 pa_assert_ctl_context();
1612 pa_proplist_update(s->proplist, mode, p);
1614 if (PA_SINK_IS_LINKED(s->state)) {
1615 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1616 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1622 /* Called from main thread */
1623 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1624 void pa_sink_set_description(pa_sink *s, const char *description) {
1626 pa_sink_assert_ref(s);
1627 pa_assert_ctl_context();
1629 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1632 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1634 if (old && description && pa_streq(old, description))
1638 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1640 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1642 if (s->monitor_source) {
1645 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1646 pa_source_set_description(s->monitor_source, n);
1650 if (PA_SINK_IS_LINKED(s->state)) {
1651 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1652 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1656 /* Called from main thread */
1657 unsigned pa_sink_linked_by(pa_sink *s) {
1660 pa_sink_assert_ref(s);
1661 pa_assert_ctl_context();
1662 pa_assert(PA_SINK_IS_LINKED(s->state));
1664 ret = pa_idxset_size(s->inputs);
1666 /* We add in the number of streams connected to us here. Please
1667 * note the asymmmetry to pa_sink_used_by()! */
1669 if (s->monitor_source)
1670 ret += pa_source_linked_by(s->monitor_source);
1675 /* Called from main thread */
1676 unsigned pa_sink_used_by(pa_sink *s) {
1679 pa_sink_assert_ref(s);
1680 pa_assert_ctl_context();
1681 pa_assert(PA_SINK_IS_LINKED(s->state));
1683 ret = pa_idxset_size(s->inputs);
1684 pa_assert(ret >= s->n_corked);
1686 /* Streams connected to our monitor source do not matter for
1687 * pa_sink_used_by()!.*/
1689 return ret - s->n_corked;
1692 /* Called from main thread */
1693 unsigned pa_sink_check_suspend(pa_sink *s) {
1698 pa_sink_assert_ref(s);
1699 pa_assert_ctl_context();
1701 if (!PA_SINK_IS_LINKED(s->state))
1706 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1707 pa_sink_input_state_t st;
1709 st = pa_sink_input_get_state(i);
1710 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1712 if (st == PA_SINK_INPUT_CORKED)
1715 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1721 if (s->monitor_source)
1722 ret += pa_source_check_suspend(s->monitor_source);
1727 /* Called from the IO thread */
1728 static void sync_input_volumes_within_thread(pa_sink *s) {
1732 pa_sink_assert_ref(s);
1733 pa_sink_assert_io_context(s);
1735 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1736 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1739 i->thread_info.soft_volume = i->soft_volume;
1740 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1744 /* Called from IO thread, except when it is not */
1745 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1746 pa_sink *s = PA_SINK(o);
1747 pa_sink_assert_ref(s);
1749 switch ((pa_sink_message_t) code) {
1751 case PA_SINK_MESSAGE_ADD_INPUT: {
1752 pa_sink_input *i = PA_SINK_INPUT(userdata);
1754 /* If you change anything here, make sure to change the
1755 * sink input handling a few lines down at
1756 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1758 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1760 /* Since the caller sleeps in pa_sink_input_put(), we can
1761 * safely access data outside of thread_info even though
1764 if ((i->thread_info.sync_prev = i->sync_prev)) {
1765 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1766 pa_assert(i->sync_prev->sync_next == i);
1767 i->thread_info.sync_prev->thread_info.sync_next = i;
1770 if ((i->thread_info.sync_next = i->sync_next)) {
1771 pa_assert(i->sink == i->thread_info.sync_next->sink);
1772 pa_assert(i->sync_next->sync_prev == i);
1773 i->thread_info.sync_next->thread_info.sync_prev = i;
1776 pa_assert(!i->thread_info.attached);
1777 i->thread_info.attached = TRUE;
1782 pa_sink_input_set_state_within_thread(i, i->state);
1784 /* The requested latency of the sink input needs to be
1785 * fixed up and then configured on the sink */
1787 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1788 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1790 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1791 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1793 /* We don't rewind here automatically. This is left to the
1794 * sink input implementor because some sink inputs need a
1795 * slow start, i.e. need some time to buffer client
1796 * samples before beginning streaming. */
1798 /* In flat volume mode we need to update the volume as
1800 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1803 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1804 pa_sink_input *i = PA_SINK_INPUT(userdata);
1806 /* If you change anything here, make sure to change the
1807 * sink input handling a few lines down at
1808 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1813 pa_sink_input_set_state_within_thread(i, i->state);
1815 pa_assert(i->thread_info.attached);
1816 i->thread_info.attached = FALSE;
1818 /* Since the caller sleeps in pa_sink_input_unlink(),
1819 * we can safely access data outside of thread_info even
1820 * though it is mutable */
1822 pa_assert(!i->sync_prev);
1823 pa_assert(!i->sync_next);
1825 if (i->thread_info.sync_prev) {
1826 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1827 i->thread_info.sync_prev = NULL;
1830 if (i->thread_info.sync_next) {
1831 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1832 i->thread_info.sync_next = NULL;
1835 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1836 pa_sink_input_unref(i);
1838 pa_sink_invalidate_requested_latency(s, TRUE);
1839 pa_sink_request_rewind(s, (size_t) -1);
1841 /* In flat volume mode we need to update the volume as
1843 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1846 case PA_SINK_MESSAGE_START_MOVE: {
1847 pa_sink_input *i = PA_SINK_INPUT(userdata);
1849 /* We don't support moving synchronized streams. */
1850 pa_assert(!i->sync_prev);
1851 pa_assert(!i->sync_next);
1852 pa_assert(!i->thread_info.sync_next);
1853 pa_assert(!i->thread_info.sync_prev);
1855 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1857 size_t sink_nbytes, total_nbytes;
1859 /* Get the latency of the sink */
1860 usec = pa_sink_get_latency_within_thread(s);
1861 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1862 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1864 if (total_nbytes > 0) {
1865 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1866 i->thread_info.rewrite_flush = TRUE;
1867 pa_sink_input_process_rewind(i, sink_nbytes);
1874 pa_assert(i->thread_info.attached);
1875 i->thread_info.attached = FALSE;
1877 /* Let's remove the sink input ...*/
1878 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1879 pa_sink_input_unref(i);
1881 pa_sink_invalidate_requested_latency(s, TRUE);
1883 pa_log_debug("Requesting rewind due to started move");
1884 pa_sink_request_rewind(s, (size_t) -1);
1886 /* In flat volume mode we need to update the volume as
1888 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1891 case PA_SINK_MESSAGE_FINISH_MOVE: {
1892 pa_sink_input *i = PA_SINK_INPUT(userdata);
1894 /* We don't support moving synchronized streams. */
1895 pa_assert(!i->sync_prev);
1896 pa_assert(!i->sync_next);
1897 pa_assert(!i->thread_info.sync_next);
1898 pa_assert(!i->thread_info.sync_prev);
1900 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1902 pa_assert(!i->thread_info.attached);
1903 i->thread_info.attached = TRUE;
1908 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1909 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1911 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1912 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1914 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1918 /* Get the latency of the sink */
1919 usec = pa_sink_get_latency_within_thread(s);
1920 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1923 pa_sink_input_drop(i, nbytes);
1925 pa_log_debug("Requesting rewind due to finished move");
1926 pa_sink_request_rewind(s, nbytes);
1929 /* In flat volume mode we need to update the volume as
1931 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1934 case PA_SINK_MESSAGE_SET_VOLUME:
1936 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1937 s->thread_info.soft_volume = s->soft_volume;
1938 pa_sink_request_rewind(s, (size_t) -1);
1941 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1944 /* Fall through ... */
1946 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1947 sync_input_volumes_within_thread(s);
1950 case PA_SINK_MESSAGE_GET_VOLUME:
1953 case PA_SINK_MESSAGE_SET_MUTE:
1955 if (s->thread_info.soft_muted != s->muted) {
1956 s->thread_info.soft_muted = s->muted;
1957 pa_sink_request_rewind(s, (size_t) -1);
1962 case PA_SINK_MESSAGE_GET_MUTE:
1965 case PA_SINK_MESSAGE_SET_STATE: {
1967 pa_bool_t suspend_change =
1968 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1969 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1971 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1973 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1974 s->thread_info.rewind_nbytes = 0;
1975 s->thread_info.rewind_requested = FALSE;
1978 if (suspend_change) {
1982 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1983 if (i->suspend_within_thread)
1984 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1990 case PA_SINK_MESSAGE_DETACH:
1992 /* Detach all streams */
1993 pa_sink_detach_within_thread(s);
1996 case PA_SINK_MESSAGE_ATTACH:
1998 /* Reattach all streams */
1999 pa_sink_attach_within_thread(s);
2002 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2004 pa_usec_t *usec = userdata;
2005 *usec = pa_sink_get_requested_latency_within_thread(s);
2007 /* Yes, that's right, the IO thread will see -1 when no
2008 * explicit requested latency is configured, the main
2009 * thread will see max_latency */
2010 if (*usec == (pa_usec_t) -1)
2011 *usec = s->thread_info.max_latency;
2016 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2017 pa_usec_t *r = userdata;
2019 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2024 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2025 pa_usec_t *r = userdata;
2027 r[0] = s->thread_info.min_latency;
2028 r[1] = s->thread_info.max_latency;
2033 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2035 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2038 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2040 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2043 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2045 *((size_t*) userdata) = s->thread_info.max_rewind;
2048 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2050 *((size_t*) userdata) = s->thread_info.max_request;
2053 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2055 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2058 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2060 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2063 case PA_SINK_MESSAGE_GET_LATENCY:
2064 case PA_SINK_MESSAGE_MAX:
2071 /* Called from main thread */
2072 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2077 pa_core_assert_ref(c);
2078 pa_assert_ctl_context();
2079 pa_assert(cause != 0);
2081 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2084 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2091 /* Called from main thread */
2092 void pa_sink_detach(pa_sink *s) {
2093 pa_sink_assert_ref(s);
2094 pa_assert_ctl_context();
2095 pa_assert(PA_SINK_IS_LINKED(s->state));
2097 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2100 /* Called from main thread */
2101 void pa_sink_attach(pa_sink *s) {
2102 pa_sink_assert_ref(s);
2103 pa_assert_ctl_context();
2104 pa_assert(PA_SINK_IS_LINKED(s->state));
2106 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2109 /* Called from IO thread */
2110 void pa_sink_detach_within_thread(pa_sink *s) {
2114 pa_sink_assert_ref(s);
2115 pa_sink_assert_io_context(s);
2116 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2118 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2122 if (s->monitor_source)
2123 pa_source_detach_within_thread(s->monitor_source);
2126 /* Called from IO thread */
2127 void pa_sink_attach_within_thread(pa_sink *s) {
2131 pa_sink_assert_ref(s);
2132 pa_sink_assert_io_context(s);
2133 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2135 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2139 if (s->monitor_source)
2140 pa_source_attach_within_thread(s->monitor_source);
2143 /* Called from IO thread */
2144 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2145 pa_sink_assert_ref(s);
2146 pa_sink_assert_io_context(s);
2147 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2149 if (s->thread_info.state == PA_SINK_SUSPENDED)
2152 if (nbytes == (size_t) -1)
2153 nbytes = s->thread_info.max_rewind;
2155 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2157 if (s->thread_info.rewind_requested &&
2158 nbytes <= s->thread_info.rewind_nbytes)
2161 s->thread_info.rewind_nbytes = nbytes;
2162 s->thread_info.rewind_requested = TRUE;
2164 if (s->request_rewind)
2165 s->request_rewind(s);
2168 /* Called from IO thread */
2169 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2170 pa_usec_t result = (pa_usec_t) -1;
2173 pa_usec_t monitor_latency;
2175 pa_sink_assert_ref(s);
2176 pa_sink_assert_io_context(s);
2178 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2179 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2181 if (s->thread_info.requested_latency_valid)
2182 return s->thread_info.requested_latency;
2184 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2185 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2186 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2187 result = i->thread_info.requested_sink_latency;
2189 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2191 if (monitor_latency != (pa_usec_t) -1 &&
2192 (result == (pa_usec_t) -1 || result > monitor_latency))
2193 result = monitor_latency;
2195 if (result != (pa_usec_t) -1)
2196 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2198 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2199 /* Only cache if properly initialized */
2200 s->thread_info.requested_latency = result;
2201 s->thread_info.requested_latency_valid = TRUE;
2207 /* Called from main thread */
2208 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2211 pa_sink_assert_ref(s);
2212 pa_assert_ctl_context();
2213 pa_assert(PA_SINK_IS_LINKED(s->state));
2215 if (s->state == PA_SINK_SUSPENDED)
2218 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2222 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2223 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2227 pa_sink_assert_ref(s);
2228 pa_sink_assert_io_context(s);
2230 if (max_rewind == s->thread_info.max_rewind)
2233 s->thread_info.max_rewind = max_rewind;
2235 if (PA_SINK_IS_LINKED(s->thread_info.state))
2236 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2237 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2239 if (s->monitor_source)
2240 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2243 /* Called from main thread */
2244 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2245 pa_sink_assert_ref(s);
2246 pa_assert_ctl_context();
2248 if (PA_SINK_IS_LINKED(s->state))
2249 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2251 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2254 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2255 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2258 pa_sink_assert_ref(s);
2259 pa_sink_assert_io_context(s);
2261 if (max_request == s->thread_info.max_request)
2264 s->thread_info.max_request = max_request;
2266 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2269 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2270 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2274 /* Called from main thread */
2275 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2276 pa_sink_assert_ref(s);
2277 pa_assert_ctl_context();
2279 if (PA_SINK_IS_LINKED(s->state))
2280 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2282 pa_sink_set_max_request_within_thread(s, max_request);
2285 /* Called from IO thread */
2286 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2290 pa_sink_assert_ref(s);
2291 pa_sink_assert_io_context(s);
2293 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2294 s->thread_info.requested_latency_valid = FALSE;
2298 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2300 if (s->update_requested_latency)
2301 s->update_requested_latency(s);
2303 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2304 if (i->update_sink_requested_latency)
2305 i->update_sink_requested_latency(i);
2309 /* Called from main thread */
2310 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2311 pa_sink_assert_ref(s);
2312 pa_assert_ctl_context();
2314 /* min_latency == 0: no limit
2315 * min_latency anything else: specified limit
2317 * Similar for max_latency */
2319 if (min_latency < ABSOLUTE_MIN_LATENCY)
2320 min_latency = ABSOLUTE_MIN_LATENCY;
2322 if (max_latency <= 0 ||
2323 max_latency > ABSOLUTE_MAX_LATENCY)
2324 max_latency = ABSOLUTE_MAX_LATENCY;
2326 pa_assert(min_latency <= max_latency);
2328 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2329 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2330 max_latency == ABSOLUTE_MAX_LATENCY) ||
2331 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2333 if (PA_SINK_IS_LINKED(s->state)) {
2339 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2341 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2344 /* Called from main thread */
2345 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2346 pa_sink_assert_ref(s);
2347 pa_assert_ctl_context();
2348 pa_assert(min_latency);
2349 pa_assert(max_latency);
2351 if (PA_SINK_IS_LINKED(s->state)) {
2352 pa_usec_t r[2] = { 0, 0 };
2354 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2356 *min_latency = r[0];
2357 *max_latency = r[1];
2359 *min_latency = s->thread_info.min_latency;
2360 *max_latency = s->thread_info.max_latency;
2364 /* Called from IO thread */
2365 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2366 pa_sink_assert_ref(s);
2367 pa_sink_assert_io_context(s);
2369 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2370 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2371 pa_assert(min_latency <= max_latency);
2373 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2374 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2375 max_latency == ABSOLUTE_MAX_LATENCY) ||
2376 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2378 if (s->thread_info.min_latency == min_latency &&
2379 s->thread_info.max_latency == max_latency)
2382 s->thread_info.min_latency = min_latency;
2383 s->thread_info.max_latency = max_latency;
2385 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2389 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2390 if (i->update_sink_latency_range)
2391 i->update_sink_latency_range(i);
2394 pa_sink_invalidate_requested_latency(s, FALSE);
2396 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2399 /* Called from main thread */
2400 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2401 pa_sink_assert_ref(s);
2402 pa_assert_ctl_context();
2404 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2405 pa_assert(latency == 0);
2409 if (latency < ABSOLUTE_MIN_LATENCY)
2410 latency = ABSOLUTE_MIN_LATENCY;
2412 if (latency > ABSOLUTE_MAX_LATENCY)
2413 latency = ABSOLUTE_MAX_LATENCY;
2415 if (PA_SINK_IS_LINKED(s->state))
2416 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2418 s->thread_info.fixed_latency = latency;
2420 pa_source_set_fixed_latency(s->monitor_source, latency);
2423 /* Called from main thread */
2424 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2427 pa_sink_assert_ref(s);
2428 pa_assert_ctl_context();
2430 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2433 if (PA_SINK_IS_LINKED(s->state))
2434 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2436 latency = s->thread_info.fixed_latency;
2441 /* Called from IO thread */
2442 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2443 pa_sink_assert_ref(s);
2444 pa_sink_assert_io_context(s);
2446 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2447 pa_assert(latency == 0);
2451 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2452 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2454 if (s->thread_info.fixed_latency == latency)
2457 s->thread_info.fixed_latency = latency;
2459 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2463 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2464 if (i->update_sink_fixed_latency)
2465 i->update_sink_fixed_latency(i);
2468 pa_sink_invalidate_requested_latency(s, FALSE);
2470 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2473 /* Called from main context */
2474 size_t pa_sink_get_max_rewind(pa_sink *s) {
2476 pa_sink_assert_ref(s);
2477 pa_assert_ctl_context();
2479 if (!PA_SINK_IS_LINKED(s->state))
2480 return s->thread_info.max_rewind;
2482 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2487 /* Called from main context */
2488 size_t pa_sink_get_max_request(pa_sink *s) {
2490 pa_sink_assert_ref(s);
2491 pa_assert_ctl_context();
2493 if (!PA_SINK_IS_LINKED(s->state))
2494 return s->thread_info.max_request;
2496 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2501 /* Called from main context */
2502 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2503 pa_device_port *port;
2505 pa_sink_assert_ref(s);
2506 pa_assert_ctl_context();
2509 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2510 return -PA_ERR_NOTIMPLEMENTED;
2514 return -PA_ERR_NOENTITY;
2516 if (!(port = pa_hashmap_get(s->ports, name)))
2517 return -PA_ERR_NOENTITY;
2519 if (s->active_port == port) {
2520 s->save_port = s->save_port || save;
2524 if ((s->set_port(s, port)) < 0)
2525 return -PA_ERR_NOENTITY;
2527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2529 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2531 s->active_port = port;
2532 s->save_port = save;
2537 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2538 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2542 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2545 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2547 if (pa_streq(ff, "microphone"))
2548 t = "audio-input-microphone";
2549 else if (pa_streq(ff, "webcam"))
2551 else if (pa_streq(ff, "computer"))
2553 else if (pa_streq(ff, "handset"))
2555 else if (pa_streq(ff, "portable"))
2556 t = "multimedia-player";
2557 else if (pa_streq(ff, "tv"))
2558 t = "video-display";
2561 * The following icons are not part of the icon naming spec,
2562 * because Rodney Dawes sucks as the maintainer of that spec.
2564 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2566 else if (pa_streq(ff, "headset"))
2567 t = "audio-headset";
2568 else if (pa_streq(ff, "headphone"))
2569 t = "audio-headphones";
2570 else if (pa_streq(ff, "speaker"))
2571 t = "audio-speakers";
2572 else if (pa_streq(ff, "hands-free"))
2573 t = "audio-handsfree";
2577 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2578 if (pa_streq(c, "modem"))
2585 t = "audio-input-microphone";
2588 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2589 if (strstr(profile, "analog"))
2591 else if (strstr(profile, "iec958"))
2593 else if (strstr(profile, "hdmi"))
2597 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2599 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2604 pa_bool_t pa_device_init_description(pa_proplist *p) {
2605 const char *s, *d = NULL, *k;
2608 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2611 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2612 if (pa_streq(s, "internal"))
2613 d = _("Internal Audio");
2616 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2617 if (pa_streq(s, "modem"))
2621 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2626 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2629 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2631 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2636 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2640 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2643 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2644 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2645 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2652 unsigned pa_device_init_priority(pa_proplist *p) {
2654 unsigned priority = 0;
2658 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2660 if (pa_streq(s, "sound"))
2662 else if (!pa_streq(s, "modem"))
2666 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2668 if (pa_streq(s, "internal"))
2670 else if (pa_streq(s, "speaker"))
2672 else if (pa_streq(s, "headphone"))
2676 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2678 if (pa_streq(s, "pci"))
2680 else if (pa_streq(s, "usb"))
2682 else if (pa_streq(s, "bluetooth"))
2686 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2688 if (pa_startswith(s, "analog-"))
2690 else if (pa_startswith(s, "iec958-"))