2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
63 data->proplist = pa_proplist_new();
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
113 pa_proplist_free(data->proplist);
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
142 void pa_device_port_free(pa_device_port *p) {
146 pa_xfree(p->description);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
194 pa_namereg_unregister(core, name);
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
217 if (!data->muted_is_set)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
229 pa_namereg_unregister(core, name);
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
237 s->state = PA_SINK_INIT;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
247 s->priority = pa_device_init_priority(s->proplist);
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
252 s->inputs = pa_idxset_new(NULL, NULL);
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
267 /* As a minor optimization we just steal the list instead of
269 s->ports = data->ports;
272 s->active_port = NULL;
273 s->save_port = FALSE;
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
279 if (!s->active_port && s->ports) {
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
291 pa_silence_memchunk_get(
292 &core->silence_cache,
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
344 pa_source_new_data_done(&source_data);
346 if (!s->monitor_source) {
352 s->monitor_source->monitor_of = s;
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
368 pa_assert_ctl_context();
370 if (s->state == state)
373 original_state = s->state;
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
380 if ((ret = s->set_state(s, state)) < 0)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
387 s->set_state(s, original_state);
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
399 if (suspend_change) {
403 /* We're suspending or resuming, tell everyone about it */
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
410 i->suspend(i, state == PA_SINK_SUSPENDED);
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
424 pa_assert(s->state == PA_SINK_INIT);
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
460 pa_source_put(s->monitor_source);
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
469 pa_sink_input *i, *j = NULL;
472 pa_assert_ctl_context();
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
482 linked = PA_SINK_IS_LINKED(s->state);
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
496 pa_sink_input_kill(i);
501 sink_set_state(s, PA_SINK_UNLINKED);
503 s->state = PA_SINK_UNLINKED;
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
525 if (PA_SINK_IS_LINKED(s->state))
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
535 pa_idxset_free(s->inputs, NULL, NULL);
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
549 pa_proplist_free(s->proplist);
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
557 pa_hashmap_free(s->ports, NULL, NULL);
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
585 s->flags = (s->flags & ~mask) | (value & mask);
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
599 s->thread_info.rtpoll = p;
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
611 if (s->state == PA_SINK_SUSPENDED)
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
658 pa_sink_input_ref(i);
660 if (pa_sink_input_start_move(i) >= 0)
663 pa_sink_input_unref(i);
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
682 pa_sink_input_unref(i);
685 pa_queue_free(q, NULL, NULL);
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
692 pa_assert_ctl_context();
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
700 pa_queue_free(q, NULL, NULL);
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
726 pa_log_debug("Processing rewind...");
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
743 size_t mixlength = *length;
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
762 info->userdata = pa_sink_input_ref(i);
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
783 unsigned n_unreffed = 0;
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
791 /* We optimize for the case where the order of the inputs has not changed */
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
795 pa_mix_info* m = NULL;
797 pa_sink_input_assert_ref(i);
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
802 if (info[p].userdata == i) {
813 pa_sink_input_drop(i, result->length);
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
822 if (m && m->chunk.memblock) {
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
843 pa_memblock_unref(c.memblock);
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
852 pa_sink_input_unref(m->userdata);
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
879 size_t block_size_max;
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
906 pa_assert(length > 0);
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
915 if (result->length > length)
916 result->length = length;
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
924 if (result->length > length)
925 result->length = length;
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
929 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
930 pa_memblock_unref(result->memblock);
931 pa_silence_memchunk_get(&s->core->silence_cache,
936 } else if (!pa_cvolume_is_norm(&volume)) {
937 pa_memchunk_make_writable(result, 0);
938 pa_volume_memchunk(result, &s->sample_spec, &volume);
942 result->memblock = pa_memblock_new(s->core->mempool, length);
944 ptr = pa_memblock_acquire(result->memblock);
945 result->length = pa_mix(info, n,
948 &s->thread_info.soft_volume,
949 s->thread_info.soft_muted);
950 pa_memblock_release(result->memblock);
955 inputs_drop(s, info, n, result);
960 /* Called from IO thread context */
961 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
962 pa_mix_info info[MAX_MIX_CHANNELS];
964 size_t length, block_size_max;
966 pa_sink_assert_ref(s);
967 pa_sink_assert_io_context(s);
968 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
970 pa_assert(target->memblock);
971 pa_assert(target->length > 0);
972 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
974 pa_assert(!s->thread_info.rewind_requested);
975 pa_assert(s->thread_info.rewind_nbytes == 0);
977 if (s->thread_info.state == PA_SINK_SUSPENDED) {
978 pa_silence_memchunk(target, &s->sample_spec);
984 length = target->length;
985 block_size_max = pa_mempool_block_size_max(s->core->mempool);
986 if (length > block_size_max)
987 length = pa_frame_align(block_size_max, &s->sample_spec);
989 pa_assert(length > 0);
991 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
994 if (target->length > length)
995 target->length = length;
997 pa_silence_memchunk(target, &s->sample_spec);
1001 if (target->length > length)
1002 target->length = length;
1004 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1006 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1007 pa_silence_memchunk(target, &s->sample_spec);
1011 vchunk = info[0].chunk;
1012 pa_memblock_ref(vchunk.memblock);
1014 if (vchunk.length > length)
1015 vchunk.length = length;
1017 if (!pa_cvolume_is_norm(&volume)) {
1018 pa_memchunk_make_writable(&vchunk, 0);
1019 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1022 pa_memchunk_memcpy(target, &vchunk);
1023 pa_memblock_unref(vchunk.memblock);
1029 ptr = pa_memblock_acquire(target->memblock);
1031 target->length = pa_mix(info, n,
1032 (uint8_t*) ptr + target->index, length,
1034 &s->thread_info.soft_volume,
1035 s->thread_info.soft_muted);
1037 pa_memblock_release(target->memblock);
1040 inputs_drop(s, info, n, target);
1045 /* Called from IO thread context */
1046 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1050 pa_sink_assert_ref(s);
1051 pa_sink_assert_io_context(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1054 pa_assert(target->memblock);
1055 pa_assert(target->length > 0);
1056 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1058 pa_assert(!s->thread_info.rewind_requested);
1059 pa_assert(s->thread_info.rewind_nbytes == 0);
1061 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1062 pa_silence_memchunk(target, &s->sample_spec);
1075 pa_sink_render_into(s, &chunk);
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_sink_assert_ref(s);
1087 pa_sink_assert_io_context(s);
1088 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1089 pa_assert(length > 0);
1090 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1098 pa_sink_render(s, length, result);
1100 if (result->length < length) {
1103 pa_memchunk_make_writable(result, length);
1105 chunk.memblock = result->memblock;
1106 chunk.index = result->index + result->length;
1107 chunk.length = length - result->length;
1109 pa_sink_render_into_full(s, &chunk);
1111 result->length = length;
1117 /* Called from main thread */
1118 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1121 pa_sink_assert_ref(s);
1122 pa_assert_ctl_context();
1123 pa_assert(PA_SINK_IS_LINKED(s->state));
1125 /* The returned value is supposed to be in the time domain of the sound card! */
1127 if (s->state == PA_SINK_SUSPENDED)
1130 if (!(s->flags & PA_SINK_LATENCY))
1133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1138 /* Called from IO thread */
1139 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1143 pa_sink_assert_ref(s);
1144 pa_sink_assert_io_context(s);
1145 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1147 /* The returned value is supposed to be in the time domain of the sound card! */
1149 if (s->thread_info.state == PA_SINK_SUSPENDED)
1152 if (!(s->flags & PA_SINK_LATENCY))
1155 o = PA_MSGOBJECT(s);
1157 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1159 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1165 static pa_cvolume* cvolume_remap_minimal_impact(
1167 const pa_cvolume *template,
1168 const pa_channel_map *from,
1169 const pa_channel_map *to) {
1174 pa_assert(template);
1178 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(v, from), NULL);
1179 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(template, to), NULL);
1181 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1182 * mapping from sink input to sink volumes:
1184 * If template is a possible remapping from v it is used instead
1185 * of remapping anew.
1187 * If the channel maps don't match we set an all-channel volume on
1188 * the sink to ensure that changing a volume on one stream has no
1189 * effect that cannot be compensated for in another stream that
1190 * does not have the same channel map as the sink. */
1192 if (pa_channel_map_equal(from, to))
1196 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1201 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1205 /* Called from main context */
1206 static void compute_reference_ratios(pa_sink *s) {
1210 pa_sink_assert_ref(s);
1211 pa_assert_ctl_context();
1212 pa_assert(PA_SINK_IS_LINKED(s->state));
1213 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1215 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1217 pa_cvolume remapped;
1220 * Calculates the reference volume from the sink's reference
1221 * volume. This basically calculates:
1223 * i->reference_ratio = i->volume / s->reference_volume
1226 remapped = s->reference_volume;
1227 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1229 i->reference_ratio.channels = i->sample_spec.channels;
1231 for (c = 0; c < i->sample_spec.channels; c++) {
1233 /* We don't update when the sink volume is 0 anyway */
1234 if (remapped.values[c] <= PA_VOLUME_MUTED)
1237 /* Don't update the reference ratio unless necessary */
1238 if (pa_sw_volume_multiply(
1239 i->reference_ratio.values[c],
1240 remapped.values[c]) == i->volume.values[c])
1243 i->reference_ratio.values[c] = pa_sw_volume_divide(
1244 i->volume.values[c],
1245 remapped.values[c]);
1250 /* Called from main context */
1251 static void compute_real_ratios(pa_sink *s) {
1255 pa_sink_assert_ref(s);
1256 pa_assert_ctl_context();
1257 pa_assert(PA_SINK_IS_LINKED(s->state));
1258 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1260 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1262 pa_cvolume remapped;
1265 * This basically calculates:
1267 * i->real_ratio := i->volume / s->real_volume
1268 * i->soft_volume := i->real_ratio * i->volume_factor
1271 remapped = s->real_volume;
1272 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1274 i->real_ratio.channels = i->sample_spec.channels;
1275 i->soft_volume.channels = i->sample_spec.channels;
1277 for (c = 0; c < i->sample_spec.channels; c++) {
1279 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1280 /* We leave i->real_ratio untouched */
1281 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1285 /* Don't lose accuracy unless necessary */
1286 if (pa_sw_volume_multiply(
1287 i->real_ratio.values[c],
1288 remapped.values[c]) != i->volume.values[c])
1290 i->real_ratio.values[c] = pa_sw_volume_divide(
1291 i->volume.values[c],
1292 remapped.values[c]);
1294 i->soft_volume.values[c] = pa_sw_volume_multiply(
1295 i->real_ratio.values[c],
1296 i->volume_factor.values[c]);
1299 /* We don't copy the soft_volume to the thread_info data
1300 * here. That must be done by the caller */
1304 /* Called from main thread */
1305 static void compute_real_volume(pa_sink *s) {
1309 pa_sink_assert_ref(s);
1310 pa_assert_ctl_context();
1311 pa_assert(PA_SINK_IS_LINKED(s->state));
1312 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1314 /* This determines the maximum volume of all streams and sets
1315 * s->real_volume accordingly. */
1317 if (pa_idxset_isempty(s->inputs)) {
1318 /* In the special case that we have no sink input we leave the
1319 * volume unmodified. */
1320 s->real_volume = s->reference_volume;
1324 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1326 /* First let's determine the new maximum volume of all inputs
1327 * connected to this sink */
1328 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1329 pa_cvolume remapped;
1331 remapped = i->volume;
1332 cvolume_remap_minimal_impact(&remapped, &s->real_volume, &i->channel_map, &s->channel_map);
1333 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1336 /* Then, let's update the real ratios/soft volumes of all inputs
1337 * connected to this sink */
1338 compute_real_ratios(s);
1341 /* Called from main thread */
1342 static void propagate_reference_volume(pa_sink *s) {
1346 pa_sink_assert_ref(s);
1347 pa_assert_ctl_context();
1348 pa_assert(PA_SINK_IS_LINKED(s->state));
1349 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1351 /* This is called whenever the sink volume changes that is not
1352 * caused by a sink input volume change. We need to fix up the
1353 * sink input volumes accordingly */
1355 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1356 pa_cvolume old_volume, remapped;
1358 old_volume = i->volume;
1360 /* This basically calculates:
1362 * i->volume := s->reference_volume * i->reference_ratio */
1364 remapped = s->reference_volume;
1365 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1366 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1368 /* The volume changed, let's tell people so */
1369 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1371 if (i->volume_changed)
1372 i->volume_changed(i);
1374 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1379 /* Called from main thread */
1380 void pa_sink_set_volume(
1382 const pa_cvolume *volume,
1386 pa_cvolume old_reference_volume;
1387 pa_bool_t reference_changed;
1389 pa_sink_assert_ref(s);
1390 pa_assert_ctl_context();
1391 pa_assert(PA_SINK_IS_LINKED(s->state));
1392 pa_assert(!volume || pa_cvolume_valid(volume));
1393 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1394 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1396 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1397 if (s->flags & PA_SINK_PASSTHROUGH) {
1398 pa_sink_input *alt_i;
1401 /* one and only one PASSTHROUGH input can possibly be connected */
1402 if (pa_idxset_size(s->inputs) == 1) {
1404 alt_i = pa_idxset_first(s->inputs, &idx);
1406 if (alt_i->flags & PA_SINK_INPUT_PASSTHROUGH) {
1407 /* FIXME: Need to notify client that volume control is disabled */
1408 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1414 /* As a special exception we accept mono volumes on all sinks --
1415 * even on those with more complex channel maps */
1417 /* If volume is NULL we synchronize the sink's real and reference
1418 * volumes with the stream volumes. If it is not NULL we update
1419 * the reference_volume with it. */
1421 old_reference_volume = s->reference_volume;
1425 if (pa_cvolume_compatible(volume, &s->sample_spec))
1426 s->reference_volume = *volume;
1428 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1430 if (s->flags & PA_SINK_FLAT_VOLUME) {
1431 /* OK, propagate this volume change back to the inputs */
1432 propagate_reference_volume(s);
1434 /* And now recalculate the real volume */
1435 compute_real_volume(s);
1437 s->real_volume = s->reference_volume;
1440 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1442 /* Ok, let's determine the new real volume */
1443 compute_real_volume(s);
1445 /* Let's 'push' the reference volume if necessary */
1446 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1448 /* We need to fix the reference ratios of all streams now that
1449 * we changed the reference volume */
1450 compute_reference_ratios(s);
1453 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1454 s->save_volume = (!reference_changed && s->save_volume) || save;
1456 if (s->set_volume) {
1457 /* If we have a function set_volume(), then we do not apply a
1458 * soft volume by default. However, set_volume() is free to
1459 * apply one to s->soft_volume */
1461 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1465 /* If we have no function set_volume(), then the soft volume
1466 * becomes the virtual volume */
1467 s->soft_volume = s->real_volume;
1469 /* This tells the sink that soft and/or virtual volume changed */
1471 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1473 if (reference_changed)
1474 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1477 /* Called from main thread. Only to be called by sink implementor */
1478 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1479 pa_sink_assert_ref(s);
1480 pa_assert_ctl_context();
1483 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1485 s->soft_volume = *volume;
1487 if (PA_SINK_IS_LINKED(s->state))
1488 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1490 s->thread_info.soft_volume = s->soft_volume;
1493 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1496 pa_cvolume old_reference_volume;
1498 pa_sink_assert_ref(s);
1499 pa_assert_ctl_context();
1500 pa_assert(PA_SINK_IS_LINKED(s->state));
1502 /* This is called when the hardware's real volume changes due to
1503 * some external event. We copy the real volume into our
1504 * reference volume and then rebuild the stream volumes based on
1505 * i->real_ratio which should stay fixed. */
1507 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1510 old_reference_volume = s->reference_volume;
1512 /* 1. Make the real volume the reference volume */
1513 s->reference_volume = s->real_volume;
1515 if (s->flags & PA_SINK_FLAT_VOLUME) {
1517 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1518 pa_cvolume old_volume, remapped;
1520 old_volume = i->volume;
1522 /* 2. Since the sink's reference and real volumes are equal
1523 * now our ratios should be too. */
1524 i->reference_ratio = i->real_ratio;
1526 /* 3. Recalculate the new stream reference volume based on the
1527 * reference ratio and the sink's reference volume.
1529 * This basically calculates:
1531 * i->volume = s->reference_volume * i->reference_ratio
1533 * This is identical to propagate_reference_volume() */
1534 remapped = s->reference_volume;
1535 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1536 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1538 /* Notify if something changed */
1539 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1541 if (i->volume_changed)
1542 i->volume_changed(i);
1544 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1549 /* Something got changed in the hardware. It probably makes sense
1550 * to save changed hw settings given that hw volume changes not
1551 * triggered by PA are almost certainly done by the user. */
1552 s->save_volume = TRUE;
1554 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1555 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1558 /* Called from main thread */
1559 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1560 pa_sink_assert_ref(s);
1561 pa_assert_ctl_context();
1562 pa_assert(PA_SINK_IS_LINKED(s->state));
1564 if (s->refresh_volume || force_refresh) {
1565 struct pa_cvolume old_real_volume;
1567 old_real_volume = s->real_volume;
1572 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1574 propagate_real_volume(s, &old_real_volume);
1577 return &s->reference_volume;
1580 /* Called from main thread */
1581 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1582 pa_cvolume old_real_volume;
1584 pa_sink_assert_ref(s);
1585 pa_assert_ctl_context();
1586 pa_assert(PA_SINK_IS_LINKED(s->state));
1588 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1590 old_real_volume = s->real_volume;
1591 s->real_volume = *new_real_volume;
1593 propagate_real_volume(s, &old_real_volume);
1596 /* Called from main thread */
1597 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1598 pa_bool_t old_muted;
1600 pa_sink_assert_ref(s);
1601 pa_assert_ctl_context();
1602 pa_assert(PA_SINK_IS_LINKED(s->state));
1604 old_muted = s->muted;
1606 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1611 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1613 if (old_muted != s->muted)
1614 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1617 /* Called from main thread */
1618 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1620 pa_sink_assert_ref(s);
1621 pa_assert_ctl_context();
1622 pa_assert(PA_SINK_IS_LINKED(s->state));
1624 if (s->refresh_muted || force_refresh) {
1625 pa_bool_t old_muted = s->muted;
1630 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1632 if (old_muted != s->muted) {
1633 s->save_muted = TRUE;
1635 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1637 /* Make sure the soft mute status stays in sync */
1638 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1645 /* Called from main thread */
1646 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1647 pa_sink_assert_ref(s);
1648 pa_assert_ctl_context();
1649 pa_assert(PA_SINK_IS_LINKED(s->state));
1651 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1653 if (s->muted == new_muted)
1656 s->muted = new_muted;
1657 s->save_muted = TRUE;
1659 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1662 /* Called from main thread */
1663 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1664 pa_sink_assert_ref(s);
1665 pa_assert_ctl_context();
1668 pa_proplist_update(s->proplist, mode, p);
1670 if (PA_SINK_IS_LINKED(s->state)) {
1671 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1672 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1678 /* Called from main thread */
1679 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1680 void pa_sink_set_description(pa_sink *s, const char *description) {
1682 pa_sink_assert_ref(s);
1683 pa_assert_ctl_context();
1685 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1688 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1690 if (old && description && pa_streq(old, description))
1694 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1696 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1698 if (s->monitor_source) {
1701 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1702 pa_source_set_description(s->monitor_source, n);
1706 if (PA_SINK_IS_LINKED(s->state)) {
1707 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1708 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1712 /* Called from main thread */
1713 unsigned pa_sink_linked_by(pa_sink *s) {
1716 pa_sink_assert_ref(s);
1717 pa_assert_ctl_context();
1718 pa_assert(PA_SINK_IS_LINKED(s->state));
1720 ret = pa_idxset_size(s->inputs);
1722 /* We add in the number of streams connected to us here. Please
1723 * note the asymmmetry to pa_sink_used_by()! */
1725 if (s->monitor_source)
1726 ret += pa_source_linked_by(s->monitor_source);
1731 /* Called from main thread */
1732 unsigned pa_sink_used_by(pa_sink *s) {
1735 pa_sink_assert_ref(s);
1736 pa_assert_ctl_context();
1737 pa_assert(PA_SINK_IS_LINKED(s->state));
1739 ret = pa_idxset_size(s->inputs);
1740 pa_assert(ret >= s->n_corked);
1742 /* Streams connected to our monitor source do not matter for
1743 * pa_sink_used_by()!.*/
1745 return ret - s->n_corked;
1748 /* Called from main thread */
1749 unsigned pa_sink_check_suspend(pa_sink *s) {
1754 pa_sink_assert_ref(s);
1755 pa_assert_ctl_context();
1757 if (!PA_SINK_IS_LINKED(s->state))
1762 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1763 pa_sink_input_state_t st;
1765 st = pa_sink_input_get_state(i);
1767 /* We do not assert here. It is perfectly valid for a sink input to
1768 * be in the INIT state (i.e. created, marked done but not yet put)
1769 * and we should not care if it's unlinked as it won't contribute
1770 * towarards our busy status.
1772 if (!PA_SINK_INPUT_IS_LINKED(st))
1775 if (st == PA_SINK_INPUT_CORKED)
1778 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1784 if (s->monitor_source)
1785 ret += pa_source_check_suspend(s->monitor_source);
1790 /* Called from the IO thread */
1791 static void sync_input_volumes_within_thread(pa_sink *s) {
1795 pa_sink_assert_ref(s);
1796 pa_sink_assert_io_context(s);
1798 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1799 if (pa_atomic_load(&i->before_ramping_v))
1800 i->thread_info.future_soft_volume = i->soft_volume;
1802 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1805 if (!pa_atomic_load(&i->before_ramping_v))
1806 i->thread_info.soft_volume = i->soft_volume;
1807 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1811 /* Called from IO thread, except when it is not */
1812 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1813 pa_sink *s = PA_SINK(o);
1814 pa_sink_assert_ref(s);
1816 switch ((pa_sink_message_t) code) {
1818 case PA_SINK_MESSAGE_ADD_INPUT: {
1819 pa_sink_input *i = PA_SINK_INPUT(userdata);
1821 /* If you change anything here, make sure to change the
1822 * sink input handling a few lines down at
1823 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1825 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1827 /* Since the caller sleeps in pa_sink_input_put(), we can
1828 * safely access data outside of thread_info even though
1831 if ((i->thread_info.sync_prev = i->sync_prev)) {
1832 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1833 pa_assert(i->sync_prev->sync_next == i);
1834 i->thread_info.sync_prev->thread_info.sync_next = i;
1837 if ((i->thread_info.sync_next = i->sync_next)) {
1838 pa_assert(i->sink == i->thread_info.sync_next->sink);
1839 pa_assert(i->sync_next->sync_prev == i);
1840 i->thread_info.sync_next->thread_info.sync_prev = i;
1843 pa_assert(!i->thread_info.attached);
1844 i->thread_info.attached = TRUE;
1849 pa_sink_input_set_state_within_thread(i, i->state);
1851 /* The requested latency of the sink input needs to be
1852 * fixed up and then configured on the sink */
1854 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1855 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1857 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1858 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1860 /* We don't rewind here automatically. This is left to the
1861 * sink input implementor because some sink inputs need a
1862 * slow start, i.e. need some time to buffer client
1863 * samples before beginning streaming. */
1865 /* In flat volume mode we need to update the volume as
1867 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1870 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1871 pa_sink_input *i = PA_SINK_INPUT(userdata);
1873 /* If you change anything here, make sure to change the
1874 * sink input handling a few lines down at
1875 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1880 pa_sink_input_set_state_within_thread(i, i->state);
1882 pa_assert(i->thread_info.attached);
1883 i->thread_info.attached = FALSE;
1885 /* Since the caller sleeps in pa_sink_input_unlink(),
1886 * we can safely access data outside of thread_info even
1887 * though it is mutable */
1889 pa_assert(!i->sync_prev);
1890 pa_assert(!i->sync_next);
1892 if (i->thread_info.sync_prev) {
1893 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1894 i->thread_info.sync_prev = NULL;
1897 if (i->thread_info.sync_next) {
1898 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1899 i->thread_info.sync_next = NULL;
1902 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1903 pa_sink_input_unref(i);
1905 pa_sink_invalidate_requested_latency(s, TRUE);
1906 pa_sink_request_rewind(s, (size_t) -1);
1908 /* In flat volume mode we need to update the volume as
1910 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1913 case PA_SINK_MESSAGE_START_MOVE: {
1914 pa_sink_input *i = PA_SINK_INPUT(userdata);
1916 /* We don't support moving synchronized streams. */
1917 pa_assert(!i->sync_prev);
1918 pa_assert(!i->sync_next);
1919 pa_assert(!i->thread_info.sync_next);
1920 pa_assert(!i->thread_info.sync_prev);
1922 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1924 size_t sink_nbytes, total_nbytes;
1926 /* Get the latency of the sink */
1927 usec = pa_sink_get_latency_within_thread(s);
1928 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1929 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1931 if (total_nbytes > 0) {
1932 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1933 i->thread_info.rewrite_flush = TRUE;
1934 pa_sink_input_process_rewind(i, sink_nbytes);
1941 pa_assert(i->thread_info.attached);
1942 i->thread_info.attached = FALSE;
1944 /* Let's remove the sink input ...*/
1945 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1946 pa_sink_input_unref(i);
1948 pa_sink_invalidate_requested_latency(s, TRUE);
1950 pa_log_debug("Requesting rewind due to started move");
1951 pa_sink_request_rewind(s, (size_t) -1);
1953 /* In flat volume mode we need to update the volume as
1955 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1958 case PA_SINK_MESSAGE_FINISH_MOVE: {
1959 pa_sink_input *i = PA_SINK_INPUT(userdata);
1961 /* We don't support moving synchronized streams. */
1962 pa_assert(!i->sync_prev);
1963 pa_assert(!i->sync_next);
1964 pa_assert(!i->thread_info.sync_next);
1965 pa_assert(!i->thread_info.sync_prev);
1967 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1969 pa_assert(!i->thread_info.attached);
1970 i->thread_info.attached = TRUE;
1975 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1976 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1978 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1979 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1981 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1985 /* Get the latency of the sink */
1986 usec = pa_sink_get_latency_within_thread(s);
1987 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1990 pa_sink_input_drop(i, nbytes);
1992 pa_log_debug("Requesting rewind due to finished move");
1993 pa_sink_request_rewind(s, nbytes);
1996 /* In flat volume mode we need to update the volume as
1998 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
2001 case PA_SINK_MESSAGE_SET_VOLUME:
2003 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2004 s->thread_info.soft_volume = s->soft_volume;
2005 pa_sink_request_rewind(s, (size_t) -1);
2008 if (!(s->flags & PA_SINK_FLAT_VOLUME))
2011 /* Fall through ... */
2013 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2014 sync_input_volumes_within_thread(s);
2017 case PA_SINK_MESSAGE_GET_VOLUME:
2020 case PA_SINK_MESSAGE_SET_MUTE:
2022 if (s->thread_info.soft_muted != s->muted) {
2023 s->thread_info.soft_muted = s->muted;
2024 pa_sink_request_rewind(s, (size_t) -1);
2029 case PA_SINK_MESSAGE_GET_MUTE:
2032 case PA_SINK_MESSAGE_SET_STATE: {
2034 pa_bool_t suspend_change =
2035 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2036 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2038 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2040 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2041 s->thread_info.rewind_nbytes = 0;
2042 s->thread_info.rewind_requested = FALSE;
2045 if (suspend_change) {
2049 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2050 if (i->suspend_within_thread)
2051 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2057 case PA_SINK_MESSAGE_DETACH:
2059 /* Detach all streams */
2060 pa_sink_detach_within_thread(s);
2063 case PA_SINK_MESSAGE_ATTACH:
2065 /* Reattach all streams */
2066 pa_sink_attach_within_thread(s);
2069 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2071 pa_usec_t *usec = userdata;
2072 *usec = pa_sink_get_requested_latency_within_thread(s);
2074 /* Yes, that's right, the IO thread will see -1 when no
2075 * explicit requested latency is configured, the main
2076 * thread will see max_latency */
2077 if (*usec == (pa_usec_t) -1)
2078 *usec = s->thread_info.max_latency;
2083 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2084 pa_usec_t *r = userdata;
2086 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2091 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2092 pa_usec_t *r = userdata;
2094 r[0] = s->thread_info.min_latency;
2095 r[1] = s->thread_info.max_latency;
2100 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2102 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2105 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2107 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2110 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2112 *((size_t*) userdata) = s->thread_info.max_rewind;
2115 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2117 *((size_t*) userdata) = s->thread_info.max_request;
2120 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2122 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2125 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2127 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2130 case PA_SINK_MESSAGE_GET_LATENCY:
2131 case PA_SINK_MESSAGE_MAX:
2138 /* Called from main thread */
2139 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2144 pa_core_assert_ref(c);
2145 pa_assert_ctl_context();
2146 pa_assert(cause != 0);
2148 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2151 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2158 /* Called from main thread */
2159 void pa_sink_detach(pa_sink *s) {
2160 pa_sink_assert_ref(s);
2161 pa_assert_ctl_context();
2162 pa_assert(PA_SINK_IS_LINKED(s->state));
2164 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2167 /* Called from main thread */
2168 void pa_sink_attach(pa_sink *s) {
2169 pa_sink_assert_ref(s);
2170 pa_assert_ctl_context();
2171 pa_assert(PA_SINK_IS_LINKED(s->state));
2173 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2176 /* Called from IO thread */
2177 void pa_sink_detach_within_thread(pa_sink *s) {
2181 pa_sink_assert_ref(s);
2182 pa_sink_assert_io_context(s);
2183 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2185 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2189 if (s->monitor_source)
2190 pa_source_detach_within_thread(s->monitor_source);
2193 /* Called from IO thread */
2194 void pa_sink_attach_within_thread(pa_sink *s) {
2198 pa_sink_assert_ref(s);
2199 pa_sink_assert_io_context(s);
2200 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2202 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2206 if (s->monitor_source)
2207 pa_source_attach_within_thread(s->monitor_source);
2210 /* Called from IO thread */
2211 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2212 pa_sink_assert_ref(s);
2213 pa_sink_assert_io_context(s);
2214 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2216 if (s->thread_info.state == PA_SINK_SUSPENDED)
2219 if (nbytes == (size_t) -1)
2220 nbytes = s->thread_info.max_rewind;
2222 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2224 if (s->thread_info.rewind_requested &&
2225 nbytes <= s->thread_info.rewind_nbytes)
2228 s->thread_info.rewind_nbytes = nbytes;
2229 s->thread_info.rewind_requested = TRUE;
2231 if (s->request_rewind)
2232 s->request_rewind(s);
2235 /* Called from IO thread */
2236 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2237 pa_usec_t result = (pa_usec_t) -1;
2240 pa_usec_t monitor_latency;
2242 pa_sink_assert_ref(s);
2243 pa_sink_assert_io_context(s);
2245 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2246 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2248 if (s->thread_info.requested_latency_valid)
2249 return s->thread_info.requested_latency;
2251 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2252 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2253 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2254 result = i->thread_info.requested_sink_latency;
2256 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2258 if (monitor_latency != (pa_usec_t) -1 &&
2259 (result == (pa_usec_t) -1 || result > monitor_latency))
2260 result = monitor_latency;
2262 if (result != (pa_usec_t) -1)
2263 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2265 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2266 /* Only cache if properly initialized */
2267 s->thread_info.requested_latency = result;
2268 s->thread_info.requested_latency_valid = TRUE;
2274 /* Called from main thread */
2275 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2278 pa_sink_assert_ref(s);
2279 pa_assert_ctl_context();
2280 pa_assert(PA_SINK_IS_LINKED(s->state));
2282 if (s->state == PA_SINK_SUSPENDED)
2285 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2289 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2290 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2294 pa_sink_assert_ref(s);
2295 pa_sink_assert_io_context(s);
2297 if (max_rewind == s->thread_info.max_rewind)
2300 s->thread_info.max_rewind = max_rewind;
2302 if (PA_SINK_IS_LINKED(s->thread_info.state))
2303 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2304 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2306 if (s->monitor_source)
2307 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2310 /* Called from main thread */
2311 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2312 pa_sink_assert_ref(s);
2313 pa_assert_ctl_context();
2315 if (PA_SINK_IS_LINKED(s->state))
2316 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2318 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2321 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2322 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2325 pa_sink_assert_ref(s);
2326 pa_sink_assert_io_context(s);
2328 if (max_request == s->thread_info.max_request)
2331 s->thread_info.max_request = max_request;
2333 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2336 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2337 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2341 /* Called from main thread */
2342 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2343 pa_sink_assert_ref(s);
2344 pa_assert_ctl_context();
2346 if (PA_SINK_IS_LINKED(s->state))
2347 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2349 pa_sink_set_max_request_within_thread(s, max_request);
2352 /* Called from IO thread */
2353 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2357 pa_sink_assert_ref(s);
2358 pa_sink_assert_io_context(s);
2360 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2361 s->thread_info.requested_latency_valid = FALSE;
2365 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2367 if (s->update_requested_latency)
2368 s->update_requested_latency(s);
2370 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2371 if (i->update_sink_requested_latency)
2372 i->update_sink_requested_latency(i);
2376 /* Called from main thread */
2377 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2378 pa_sink_assert_ref(s);
2379 pa_assert_ctl_context();
2381 /* min_latency == 0: no limit
2382 * min_latency anything else: specified limit
2384 * Similar for max_latency */
2386 if (min_latency < ABSOLUTE_MIN_LATENCY)
2387 min_latency = ABSOLUTE_MIN_LATENCY;
2389 if (max_latency <= 0 ||
2390 max_latency > ABSOLUTE_MAX_LATENCY)
2391 max_latency = ABSOLUTE_MAX_LATENCY;
2393 pa_assert(min_latency <= max_latency);
2395 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2396 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2397 max_latency == ABSOLUTE_MAX_LATENCY) ||
2398 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2400 if (PA_SINK_IS_LINKED(s->state)) {
2406 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2408 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2411 /* Called from main thread */
2412 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2413 pa_sink_assert_ref(s);
2414 pa_assert_ctl_context();
2415 pa_assert(min_latency);
2416 pa_assert(max_latency);
2418 if (PA_SINK_IS_LINKED(s->state)) {
2419 pa_usec_t r[2] = { 0, 0 };
2421 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2423 *min_latency = r[0];
2424 *max_latency = r[1];
2426 *min_latency = s->thread_info.min_latency;
2427 *max_latency = s->thread_info.max_latency;
2431 /* Called from IO thread */
2432 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2433 pa_sink_assert_ref(s);
2434 pa_sink_assert_io_context(s);
2436 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2437 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2438 pa_assert(min_latency <= max_latency);
2440 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2441 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2442 max_latency == ABSOLUTE_MAX_LATENCY) ||
2443 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2445 if (s->thread_info.min_latency == min_latency &&
2446 s->thread_info.max_latency == max_latency)
2449 s->thread_info.min_latency = min_latency;
2450 s->thread_info.max_latency = max_latency;
2452 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2456 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2457 if (i->update_sink_latency_range)
2458 i->update_sink_latency_range(i);
2461 pa_sink_invalidate_requested_latency(s, FALSE);
2463 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2466 /* Called from main thread */
2467 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2468 pa_sink_assert_ref(s);
2469 pa_assert_ctl_context();
2471 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2472 pa_assert(latency == 0);
2476 if (latency < ABSOLUTE_MIN_LATENCY)
2477 latency = ABSOLUTE_MIN_LATENCY;
2479 if (latency > ABSOLUTE_MAX_LATENCY)
2480 latency = ABSOLUTE_MAX_LATENCY;
2482 if (PA_SINK_IS_LINKED(s->state))
2483 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2485 s->thread_info.fixed_latency = latency;
2487 pa_source_set_fixed_latency(s->monitor_source, latency);
2490 /* Called from main thread */
2491 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2494 pa_sink_assert_ref(s);
2495 pa_assert_ctl_context();
2497 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2500 if (PA_SINK_IS_LINKED(s->state))
2501 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2503 latency = s->thread_info.fixed_latency;
2508 /* Called from IO thread */
2509 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2510 pa_sink_assert_ref(s);
2511 pa_sink_assert_io_context(s);
2513 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2514 pa_assert(latency == 0);
2518 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2519 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2521 if (s->thread_info.fixed_latency == latency)
2524 s->thread_info.fixed_latency = latency;
2526 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2530 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2531 if (i->update_sink_fixed_latency)
2532 i->update_sink_fixed_latency(i);
2535 pa_sink_invalidate_requested_latency(s, FALSE);
2537 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2540 /* Called from main context */
2541 size_t pa_sink_get_max_rewind(pa_sink *s) {
2543 pa_sink_assert_ref(s);
2544 pa_assert_ctl_context();
2546 if (!PA_SINK_IS_LINKED(s->state))
2547 return s->thread_info.max_rewind;
2549 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2554 /* Called from main context */
2555 size_t pa_sink_get_max_request(pa_sink *s) {
2557 pa_sink_assert_ref(s);
2558 pa_assert_ctl_context();
2560 if (!PA_SINK_IS_LINKED(s->state))
2561 return s->thread_info.max_request;
2563 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2568 /* Called from main context */
2569 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2570 pa_device_port *port;
2572 pa_sink_assert_ref(s);
2573 pa_assert_ctl_context();
2576 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2577 return -PA_ERR_NOTIMPLEMENTED;
2581 return -PA_ERR_NOENTITY;
2583 if (!(port = pa_hashmap_get(s->ports, name)))
2584 return -PA_ERR_NOENTITY;
2586 if (s->active_port == port) {
2587 s->save_port = s->save_port || save;
2591 if ((s->set_port(s, port)) < 0)
2592 return -PA_ERR_NOENTITY;
2594 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2596 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2598 s->active_port = port;
2599 s->save_port = save;
2604 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2605 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2609 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2612 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2614 if (pa_streq(ff, "microphone"))
2615 t = "audio-input-microphone";
2616 else if (pa_streq(ff, "webcam"))
2618 else if (pa_streq(ff, "computer"))
2620 else if (pa_streq(ff, "handset"))
2622 else if (pa_streq(ff, "portable"))
2623 t = "multimedia-player";
2624 else if (pa_streq(ff, "tv"))
2625 t = "video-display";
2628 * The following icons are not part of the icon naming spec,
2629 * because Rodney Dawes sucks as the maintainer of that spec.
2631 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2633 else if (pa_streq(ff, "headset"))
2634 t = "audio-headset";
2635 else if (pa_streq(ff, "headphone"))
2636 t = "audio-headphones";
2637 else if (pa_streq(ff, "speaker"))
2638 t = "audio-speakers";
2639 else if (pa_streq(ff, "hands-free"))
2640 t = "audio-handsfree";
2644 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2645 if (pa_streq(c, "modem"))
2652 t = "audio-input-microphone";
2655 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2656 if (strstr(profile, "analog"))
2658 else if (strstr(profile, "iec958"))
2660 else if (strstr(profile, "hdmi"))
2664 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2666 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2671 pa_bool_t pa_device_init_description(pa_proplist *p) {
2672 const char *s, *d = NULL, *k;
2675 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2678 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2679 if (pa_streq(s, "internal"))
2680 d = _("Internal Audio");
2683 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2684 if (pa_streq(s, "modem"))
2688 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2693 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2696 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2698 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2703 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2707 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2710 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2711 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2712 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2719 unsigned pa_device_init_priority(pa_proplist *p) {
2721 unsigned priority = 0;
2725 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2727 if (pa_streq(s, "sound"))
2729 else if (!pa_streq(s, "modem"))
2733 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2735 if (pa_streq(s, "internal"))
2737 else if (pa_streq(s, "speaker"))
2739 else if (pa_streq(s, "headphone"))
2743 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2745 if (pa_streq(s, "pci"))
2747 else if (pa_streq(s, "usb"))
2749 else if (pa_streq(s, "bluetooth"))
2753 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2755 if (pa_startswith(s, "analog-"))
2757 else if (pa_startswith(s, "iec958-"))