2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
113 pa_proplist_free(data->proplist);
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
142 void pa_device_port_free(pa_device_port *p) {
146 pa_xfree(p->description);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
179 pa_assert(data->name);
181 s = pa_msgobject_new(pa_sink);
183 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
188 pa_sink_new_data_set_name(data, name);
190 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
192 pa_namereg_unregister(core, name);
196 /* FIXME, need to free s here on failure */
198 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
199 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
201 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
203 if (!data->channel_map_is_set)
204 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
206 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
207 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
209 if (!data->volume_is_set)
210 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
212 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
213 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
215 if (!data->muted_is_set)
219 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
221 pa_device_init_description(data->proplist);
222 pa_device_init_icon(data->proplist, TRUE);
223 pa_device_init_intended_roles(data->proplist);
225 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
227 pa_namereg_unregister(core, name);
231 s->parent.parent.free = sink_free;
232 s->parent.process_msg = pa_sink_process_msg;
235 s->state = PA_SINK_INIT;
237 s->suspend_cause = 0;
238 s->name = pa_xstrdup(name);
239 s->proplist = pa_proplist_copy(data->proplist);
240 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
241 s->module = data->module;
242 s->card = data->card;
244 s->sample_spec = data->sample_spec;
245 s->channel_map = data->channel_map;
247 s->inputs = pa_idxset_new(NULL, NULL);
250 s->reference_volume = s->virtual_volume = data->volume;
251 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
252 s->base_volume = PA_VOLUME_NORM;
253 s->n_volume_steps = PA_VOLUME_NORM+1;
254 s->muted = data->muted;
255 s->refresh_volume = s->refresh_muted = FALSE;
257 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
265 /* As a minor optimization we just steal the list instead of
267 s->ports = data->ports;
270 s->active_port = NULL;
271 s->save_port = FALSE;
273 if (data->active_port && s->ports)
274 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
275 s->save_port = data->save_port;
277 if (!s->active_port && s->ports) {
281 PA_HASHMAP_FOREACH(p, s->ports, state)
282 if (!s->active_port || p->priority > s->active_port->priority)
286 s->save_volume = data->save_volume;
287 s->save_muted = data->save_muted;
289 pa_silence_memchunk_get(
290 &core->silence_cache,
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
309 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
314 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
315 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
318 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
319 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
323 pa_source_new_data_init(&source_data);
324 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
325 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
326 source_data.name = pa_sprintf_malloc("%s.monitor", name);
327 source_data.driver = data->driver;
328 source_data.module = data->module;
329 source_data.card = data->card;
331 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
332 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
333 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
335 s->monitor_source = pa_source_new(core, &source_data,
336 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
337 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
339 pa_source_new_data_done(&source_data);
341 if (!s->monitor_source) {
347 s->monitor_source->monitor_of = s;
349 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
350 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
355 /* Called from main context */
356 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
358 pa_bool_t suspend_change;
359 pa_sink_state_t original_state;
363 if (s->state == state)
366 original_state = s->state;
369 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
370 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
373 if ((ret = s->set_state(s, state)) < 0)
377 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
380 s->set_state(s, original_state);
387 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
388 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
389 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
392 if (suspend_change) {
396 /* We're suspending or resuming, tell everyone about it */
398 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
399 if (s->state == PA_SINK_SUSPENDED &&
400 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
401 pa_sink_input_kill(i);
403 i->suspend(i, state == PA_SINK_SUSPENDED);
405 if (s->monitor_source)
406 pa_source_sync_suspend(s->monitor_source);
412 /* Called from main context */
413 void pa_sink_put(pa_sink* s) {
414 pa_sink_assert_ref(s);
416 pa_assert(s->state == PA_SINK_INIT);
418 /* The following fields must be initialized properly when calling _put() */
419 pa_assert(s->asyncmsgq);
420 pa_assert(s->rtpoll);
421 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
423 /* Generally, flags should be initialized via pa_sink_new(). As a
424 * special exception we allow volume related flags to be set
425 * between _new() and _put(). */
427 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
428 s->flags |= PA_SINK_DECIBEL_VOLUME;
430 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
431 s->flags |= PA_SINK_FLAT_VOLUME;
433 s->thread_info.soft_volume = s->soft_volume;
434 s->thread_info.soft_muted = s->muted;
436 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
437 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
438 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
439 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
440 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
442 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
443 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
444 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
446 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
448 pa_source_put(s->monitor_source);
450 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
451 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
454 /* Called from main context */
455 void pa_sink_unlink(pa_sink* s) {
457 pa_sink_input *i, *j = NULL;
461 /* Please note that pa_sink_unlink() does more than simply
462 * reversing pa_sink_put(). It also undoes the registrations
463 * already done in pa_sink_new()! */
465 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
466 * may be called multiple times on the same sink without bad
469 linked = PA_SINK_IS_LINKED(s->state);
472 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
474 if (s->state != PA_SINK_UNLINKED)
475 pa_namereg_unregister(s->core, s->name);
476 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
479 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
481 while ((i = pa_idxset_first(s->inputs, NULL))) {
483 pa_sink_input_kill(i);
488 sink_set_state(s, PA_SINK_UNLINKED);
490 s->state = PA_SINK_UNLINKED;
494 if (s->monitor_source)
495 pa_source_unlink(s->monitor_source);
498 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
499 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
503 /* Called from main context */
504 static void sink_free(pa_object *o) {
505 pa_sink *s = PA_SINK(o);
509 pa_assert(pa_sink_refcnt(s) == 0);
511 if (PA_SINK_IS_LINKED(s->state))
514 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
516 if (s->monitor_source) {
517 pa_source_unref(s->monitor_source);
518 s->monitor_source = NULL;
521 pa_idxset_free(s->inputs, NULL, NULL);
523 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
524 pa_sink_input_unref(i);
526 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
528 if (s->silence.memblock)
529 pa_memblock_unref(s->silence.memblock);
535 pa_proplist_free(s->proplist);
540 while ((p = pa_hashmap_steal_first(s->ports)))
541 pa_device_port_free(p);
543 pa_hashmap_free(s->ports, NULL, NULL);
549 /* Called from main context */
550 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
551 pa_sink_assert_ref(s);
555 if (s->monitor_source)
556 pa_source_set_asyncmsgq(s->monitor_source, q);
559 /* Called from main context */
560 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
561 pa_sink_assert_ref(s);
565 if (s->monitor_source)
566 pa_source_set_rtpoll(s->monitor_source, p);
569 /* Called from main context */
570 int pa_sink_update_status(pa_sink*s) {
571 pa_sink_assert_ref(s);
572 pa_assert(PA_SINK_IS_LINKED(s->state));
574 if (s->state == PA_SINK_SUSPENDED)
577 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
580 /* Called from main context */
581 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
582 pa_sink_assert_ref(s);
583 pa_assert(PA_SINK_IS_LINKED(s->state));
584 pa_assert(cause != 0);
587 s->suspend_cause |= cause;
588 s->monitor_source->suspend_cause |= cause;
590 s->suspend_cause &= ~cause;
591 s->monitor_source->suspend_cause &= ~cause;
594 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
597 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
599 if (s->suspend_cause)
600 return sink_set_state(s, PA_SINK_SUSPENDED);
602 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
605 /* Called from main context */
606 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
607 pa_sink_input *i, *n;
610 pa_sink_assert_ref(s);
611 pa_assert(PA_SINK_IS_LINKED(s->state));
616 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
617 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
619 pa_sink_input_ref(i);
621 if (pa_sink_input_start_move(i) >= 0)
624 pa_sink_input_unref(i);
630 /* Called from main context */
631 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
634 pa_sink_assert_ref(s);
635 pa_assert(PA_SINK_IS_LINKED(s->state));
638 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
639 if (pa_sink_input_finish_move(i, s, save) < 0)
640 pa_sink_input_kill(i);
642 pa_sink_input_unref(i);
645 pa_queue_free(q, NULL, NULL);
648 /* Called from main context */
649 void pa_sink_move_all_fail(pa_queue *q) {
653 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
654 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
655 pa_sink_input_kill(i);
656 pa_sink_input_unref(i);
660 pa_queue_free(q, NULL, NULL);
663 /* Called from IO thread context */
664 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
667 pa_sink_assert_ref(s);
668 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
670 /* If nobody requested this and this is actually no real rewind
671 * then we can short cut this */
672 if (!s->thread_info.rewind_requested && nbytes <= 0)
675 s->thread_info.rewind_nbytes = 0;
676 s->thread_info.rewind_requested = FALSE;
678 if (s->thread_info.state == PA_SINK_SUSPENDED)
682 pa_log_debug("Processing rewind...");
684 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
685 pa_sink_input_assert_ref(i);
686 pa_sink_input_process_rewind(i, nbytes);
690 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
691 pa_source_process_rewind(s->monitor_source, nbytes);
694 /* Called from IO thread context */
695 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
699 size_t mixlength = *length;
701 pa_sink_assert_ref(s);
704 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
705 pa_sink_input_assert_ref(i);
707 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
709 if (mixlength == 0 || info->chunk.length < mixlength)
710 mixlength = info->chunk.length;
712 if (pa_memblock_is_silence(info->chunk.memblock)) {
713 pa_memblock_unref(info->chunk.memblock);
717 info->userdata = pa_sink_input_ref(i);
719 pa_assert(info->chunk.memblock);
720 pa_assert(info->chunk.length > 0);
733 /* Called from IO thread context */
734 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
738 unsigned n_unreffed = 0;
740 pa_sink_assert_ref(s);
742 pa_assert(result->memblock);
743 pa_assert(result->length > 0);
745 /* We optimize for the case where the order of the inputs has not changed */
747 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
749 pa_mix_info* m = NULL;
751 pa_sink_input_assert_ref(i);
753 /* Let's try to find the matching entry info the pa_mix_info array */
754 for (j = 0; j < n; j ++) {
756 if (info[p].userdata == i) {
767 pa_sink_input_drop(i, result->length);
769 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
771 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
776 if (m && m->chunk.memblock) {
778 pa_memblock_ref(c.memblock);
779 pa_assert(result->length <= c.length);
780 c.length = result->length;
782 pa_memchunk_make_writable(&c, 0);
783 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
786 pa_memblock_ref(c.memblock);
787 pa_assert(result->length <= c.length);
788 c.length = result->length;
791 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
792 pa_source_output_assert_ref(o);
793 pa_assert(o->direct_on_input == i);
794 pa_source_post_direct(s->monitor_source, o, &c);
797 pa_memblock_unref(c.memblock);
802 if (m->chunk.memblock)
803 pa_memblock_unref(m->chunk.memblock);
804 pa_memchunk_reset(&m->chunk);
806 pa_sink_input_unref(m->userdata);
813 /* Now drop references to entries that are included in the
814 * pa_mix_info array but don't exist anymore */
816 if (n_unreffed < n) {
817 for (; n > 0; info++, n--) {
819 pa_sink_input_unref(info->userdata);
820 if (info->chunk.memblock)
821 pa_memblock_unref(info->chunk.memblock);
825 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
826 pa_source_post(s->monitor_source, result);
829 /* Called from IO thread context */
830 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
831 pa_mix_info info[MAX_MIX_CHANNELS];
833 size_t block_size_max;
835 pa_sink_assert_ref(s);
836 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
837 pa_assert(pa_frame_aligned(length, &s->sample_spec));
842 pa_assert(!s->thread_info.rewind_requested);
843 pa_assert(s->thread_info.rewind_nbytes == 0);
845 if (s->thread_info.state == PA_SINK_SUSPENDED) {
846 result->memblock = pa_memblock_ref(s->silence.memblock);
847 result->index = s->silence.index;
848 result->length = PA_MIN(s->silence.length, length);
853 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
855 block_size_max = pa_mempool_block_size_max(s->core->mempool);
856 if (length > block_size_max)
857 length = pa_frame_align(block_size_max, &s->sample_spec);
859 pa_assert(length > 0);
861 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
865 *result = s->silence;
866 pa_memblock_ref(result->memblock);
868 if (result->length > length)
869 result->length = length;
874 *result = info[0].chunk;
875 pa_memblock_ref(result->memblock);
877 if (result->length > length)
878 result->length = length;
880 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
882 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
883 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
884 pa_memblock_unref(result->memblock);
885 pa_silence_memchunk_get(&s->core->silence_cache,
891 pa_memchunk_make_writable(result, 0);
892 pa_volume_memchunk(result, &s->sample_spec, &volume);
897 result->memblock = pa_memblock_new(s->core->mempool, length);
899 ptr = pa_memblock_acquire(result->memblock);
900 result->length = pa_mix(info, n,
903 &s->thread_info.soft_volume,
904 s->thread_info.soft_muted);
905 pa_memblock_release(result->memblock);
910 inputs_drop(s, info, n, result);
915 /* Called from IO thread context */
916 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
917 pa_mix_info info[MAX_MIX_CHANNELS];
919 size_t length, block_size_max;
921 pa_sink_assert_ref(s);
922 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
924 pa_assert(target->memblock);
925 pa_assert(target->length > 0);
926 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
930 pa_assert(!s->thread_info.rewind_requested);
931 pa_assert(s->thread_info.rewind_nbytes == 0);
933 if (s->thread_info.state == PA_SINK_SUSPENDED) {
934 pa_silence_memchunk(target, &s->sample_spec);
938 length = target->length;
939 block_size_max = pa_mempool_block_size_max(s->core->mempool);
940 if (length > block_size_max)
941 length = pa_frame_align(block_size_max, &s->sample_spec);
943 pa_assert(length > 0);
945 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
948 if (target->length > length)
949 target->length = length;
951 pa_silence_memchunk(target, &s->sample_spec);
955 if (target->length > length)
956 target->length = length;
958 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
960 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
961 pa_silence_memchunk(target, &s->sample_spec);
965 vchunk = info[0].chunk;
966 pa_memblock_ref(vchunk.memblock);
968 if (vchunk.length > length)
969 vchunk.length = length;
971 if (!pa_cvolume_is_norm(&volume)) {
972 pa_memchunk_make_writable(&vchunk, 0);
973 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
976 pa_memchunk_memcpy(target, &vchunk);
977 pa_memblock_unref(vchunk.memblock);
983 ptr = pa_memblock_acquire(target->memblock);
985 target->length = pa_mix(info, n,
986 (uint8_t*) ptr + target->index, length,
988 &s->thread_info.soft_volume,
989 s->thread_info.soft_muted);
991 pa_memblock_release(target->memblock);
994 inputs_drop(s, info, n, target);
999 /* Called from IO thread context */
1000 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1004 pa_sink_assert_ref(s);
1005 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1007 pa_assert(target->memblock);
1008 pa_assert(target->length > 0);
1009 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1013 pa_assert(!s->thread_info.rewind_requested);
1014 pa_assert(s->thread_info.rewind_nbytes == 0);
1023 pa_sink_render_into(s, &chunk);
1032 /* Called from IO thread context */
1033 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1034 pa_mix_info info[MAX_MIX_CHANNELS];
1035 size_t length1st = length;
1038 pa_sink_assert_ref(s);
1039 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1040 pa_assert(length > 0);
1041 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1046 pa_assert(!s->thread_info.rewind_requested);
1047 pa_assert(s->thread_info.rewind_nbytes == 0);
1049 pa_assert(length > 0);
1051 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1054 pa_silence_memchunk_get(&s->core->silence_cache,
1059 } else if (n == 1) {
1062 *result = info[0].chunk;
1063 pa_memblock_ref(result->memblock);
1065 if (result->length > length)
1066 result->length = length;
1068 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1070 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1071 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1072 pa_memblock_unref(result->memblock);
1073 pa_silence_memchunk_get(&s->core->silence_cache,
1079 pa_memchunk_make_writable(result, length);
1080 pa_volume_memchunk(result, &s->sample_spec, &volume);
1087 result->memblock = pa_memblock_new(s->core->mempool, length);
1089 ptr = pa_memblock_acquire(result->memblock);
1091 result->length = pa_mix(info, n,
1092 (uint8_t*) ptr + result->index, length1st,
1094 &s->thread_info.soft_volume,
1095 s->thread_info.soft_muted);
1097 pa_memblock_release(result->memblock);
1100 inputs_drop(s, info, n, result);
1102 if (result->length < length) {
1105 pa_memchunk_make_writable(result, length);
1107 l = length - result->length;
1108 d = result->index + result->length;
1114 pa_sink_render_into(s, &chunk);
1119 result->length = length;
1125 /* Called from main thread */
1126 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1129 pa_sink_assert_ref(s);
1130 pa_assert(PA_SINK_IS_LINKED(s->state));
1132 /* The returned value is supposed to be in the time domain of the sound card! */
1134 if (s->state == PA_SINK_SUSPENDED)
1137 if (!(s->flags & PA_SINK_LATENCY))
1140 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1145 /* Called from IO thread */
1146 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1150 pa_sink_assert_ref(s);
1151 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1153 /* The returned value is supposed to be in the time domain of the sound card! */
1155 if (s->thread_info.state == PA_SINK_SUSPENDED)
1158 if (!(s->flags & PA_SINK_LATENCY))
1161 o = PA_MSGOBJECT(s);
1163 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1165 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1171 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1174 pa_sink_input_assert_ref(i);
1175 pa_assert(new_volume->channels == i->sample_spec.channels);
1178 * This basically calculates:
1180 * i->relative_volume := i->virtual_volume / new_volume
1181 * i->soft_volume := i->relative_volume * i->volume_factor
1184 /* The new sink volume passed in here must already be remapped to
1185 * the sink input's channel map! */
1187 i->soft_volume.channels = i->sample_spec.channels;
1189 for (c = 0; c < i->sample_spec.channels; c++)
1191 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1192 /* We leave i->relative_volume untouched */
1193 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1195 i->relative_volume[c] =
1196 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1197 pa_sw_volume_to_linear(new_volume->values[c]);
1199 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1200 i->relative_volume[c] *
1201 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1204 /* Hooks have the ability to play games with i->soft_volume */
1205 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1207 /* We don't copy the soft_volume to the thread_info data
1208 * here. That must be done by the caller */
1211 /* Called from main thread */
1212 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1216 pa_sink_assert_ref(s);
1217 pa_assert(new_volume);
1218 pa_assert(PA_SINK_IS_LINKED(s->state));
1219 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1221 /* This is called whenever a sink input volume changes or a sink
1222 * input is added/removed and we might need to fix up the sink
1223 * volume accordingly. Please note that we don't actually update
1224 * the sinks volume here, we only return how it needs to be
1225 * updated. The caller should then call pa_sink_set_volume().*/
1227 if (pa_idxset_isempty(s->inputs)) {
1228 /* In the special case that we have no sink input we leave the
1229 * volume unmodified. */
1230 *new_volume = s->reference_volume;
1234 pa_cvolume_mute(new_volume, s->channel_map.channels);
1236 /* First let's determine the new maximum volume of all inputs
1237 * connected to this sink */
1238 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1240 pa_cvolume remapped_volume;
1242 remapped_volume = i->virtual_volume;
1243 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1245 for (c = 0; c < new_volume->channels; c++)
1246 if (remapped_volume.values[c] > new_volume->values[c])
1247 new_volume->values[c] = remapped_volume.values[c];
1250 /* Then, let's update the soft volumes of all inputs connected
1252 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1253 pa_cvolume remapped_new_volume;
1255 remapped_new_volume = *new_volume;
1256 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1257 compute_new_soft_volume(i, &remapped_new_volume);
1259 /* We don't copy soft_volume to the thread_info data here
1260 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1261 * want the update to be atomically with the sink volume
1262 * update, hence we do it within the pa_sink_set_volume() call
1267 /* Called from main thread */
1268 void pa_sink_propagate_flat_volume(pa_sink *s) {
1272 pa_sink_assert_ref(s);
1273 pa_assert(PA_SINK_IS_LINKED(s->state));
1274 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1276 /* This is called whenever the sink volume changes that is not
1277 * caused by a sink input volume change. We need to fix up the
1278 * sink input volumes accordingly */
1280 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1281 pa_cvolume sink_volume, new_virtual_volume;
1284 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1286 sink_volume = s->virtual_volume;
1287 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1289 for (c = 0; c < i->sample_spec.channels; c++)
1290 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1291 i->relative_volume[c] *
1292 pa_sw_volume_to_linear(sink_volume.values[c]));
1294 new_virtual_volume.channels = i->sample_spec.channels;
1296 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1297 i->virtual_volume = new_virtual_volume;
1299 /* Hmm, the soft volume might no longer actually match
1300 * what has been chosen as new virtual volume here,
1301 * especially when the old volume was
1302 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1304 compute_new_soft_volume(i, &sink_volume);
1306 /* The virtual volume changed, let's tell people so */
1307 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1311 /* If the soft_volume of any of the sink inputs got changed, let's
1312 * make sure the thread copies are synced up. */
1313 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1316 /* Called from main thread */
1317 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1318 pa_bool_t virtual_volume_changed;
1320 pa_sink_assert_ref(s);
1321 pa_assert(PA_SINK_IS_LINKED(s->state));
1323 pa_assert(pa_cvolume_valid(volume));
1324 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1326 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1327 s->virtual_volume = *volume;
1328 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1330 if (become_reference)
1331 s->reference_volume = s->virtual_volume;
1333 /* Propagate this volume change back to the inputs */
1334 if (virtual_volume_changed)
1335 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1336 pa_sink_propagate_flat_volume(s);
1338 if (s->set_volume) {
1339 /* If we have a function set_volume(), then we do not apply a
1340 * soft volume by default. However, set_volume() is free to
1341 * apply one to s->soft_volume */
1343 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1347 /* If we have no function set_volume(), then the soft volume
1348 * becomes the virtual volume */
1349 s->soft_volume = s->virtual_volume;
1351 /* This tells the sink that soft and/or virtual volume changed */
1353 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1355 if (virtual_volume_changed)
1356 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1359 /* Called from main thread. Only to be called by sink implementor */
1360 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1361 pa_sink_assert_ref(s);
1364 s->soft_volume = *volume;
1366 if (PA_SINK_IS_LINKED(s->state))
1367 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1369 s->thread_info.soft_volume = *volume;
1372 /* Called from main thread */
1373 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1374 pa_sink_assert_ref(s);
1376 if (s->refresh_volume || force_refresh) {
1377 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1382 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1384 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1386 s->reference_volume = s->virtual_volume;
1388 if (s->flags & PA_SINK_FLAT_VOLUME)
1389 pa_sink_propagate_flat_volume(s);
1391 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1395 return reference ? &s->reference_volume : &s->virtual_volume;
1398 /* Called from main thread */
1399 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume, pa_bool_t save) {
1400 pa_sink_assert_ref(s);
1402 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1403 if (pa_cvolume_equal(&s->virtual_volume, new_volume)) {
1404 s->save_volume = s->save_volume || save;
1408 s->reference_volume = s->virtual_volume = *new_volume;
1409 s->save_volume = save;
1411 if (s->flags & PA_SINK_FLAT_VOLUME)
1412 pa_sink_propagate_flat_volume(s);
1414 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1417 /* Called from main thread */
1418 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1419 pa_bool_t old_muted;
1421 pa_sink_assert_ref(s);
1422 pa_assert(PA_SINK_IS_LINKED(s->state));
1424 old_muted = s->muted;
1426 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1431 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1433 if (old_muted != s->muted)
1434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1437 /* Called from main thread */
1438 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1440 pa_sink_assert_ref(s);
1442 if (s->refresh_muted || force_refresh) {
1443 pa_bool_t old_muted = s->muted;
1448 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1450 if (old_muted != s->muted) {
1451 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1453 /* Make sure the soft mute status stays in sync */
1454 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1461 /* Called from main thread */
1462 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted, pa_bool_t save) {
1463 pa_sink_assert_ref(s);
1465 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1467 if (s->muted == new_muted) {
1468 s->save_muted = s->save_muted || save;
1472 s->muted = new_muted;
1473 s->save_muted = save;
1475 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1478 /* Called from main thread */
1479 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1480 pa_sink_assert_ref(s);
1483 pa_proplist_update(s->proplist, mode, p);
1485 if (PA_SINK_IS_LINKED(s->state)) {
1486 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1487 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1493 /* Called from main thread */
1494 void pa_sink_set_description(pa_sink *s, const char *description) {
1496 pa_sink_assert_ref(s);
1498 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1501 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1503 if (old && description && !strcmp(old, description))
1507 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1509 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1511 if (s->monitor_source) {
1514 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1515 pa_source_set_description(s->monitor_source, n);
1519 if (PA_SINK_IS_LINKED(s->state)) {
1520 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1521 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1525 /* Called from main thread */
1526 unsigned pa_sink_linked_by(pa_sink *s) {
1529 pa_sink_assert_ref(s);
1530 pa_assert(PA_SINK_IS_LINKED(s->state));
1532 ret = pa_idxset_size(s->inputs);
1534 /* We add in the number of streams connected to us here. Please
1535 * note the asymmmetry to pa_sink_used_by()! */
1537 if (s->monitor_source)
1538 ret += pa_source_linked_by(s->monitor_source);
1543 /* Called from main thread */
1544 unsigned pa_sink_used_by(pa_sink *s) {
1547 pa_sink_assert_ref(s);
1548 pa_assert(PA_SINK_IS_LINKED(s->state));
1550 ret = pa_idxset_size(s->inputs);
1551 pa_assert(ret >= s->n_corked);
1553 /* Streams connected to our monitor source do not matter for
1554 * pa_sink_used_by()!.*/
1556 return ret - s->n_corked;
1559 /* Called from main thread */
1560 unsigned pa_sink_check_suspend(pa_sink *s) {
1565 pa_sink_assert_ref(s);
1567 if (!PA_SINK_IS_LINKED(s->state))
1572 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1573 pa_sink_input_state_t st;
1575 st = pa_sink_input_get_state(i);
1576 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1578 if (st == PA_SINK_INPUT_CORKED)
1581 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1587 if (s->monitor_source)
1588 ret += pa_source_check_suspend(s->monitor_source);
1593 /* Called from the IO thread */
1594 static void sync_input_volumes_within_thread(pa_sink *s) {
1598 pa_sink_assert_ref(s);
1600 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1601 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1604 i->thread_info.soft_volume = i->soft_volume;
1605 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1609 /* Called from IO thread, except when it is not */
1610 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1611 pa_sink *s = PA_SINK(o);
1612 pa_sink_assert_ref(s);
1614 switch ((pa_sink_message_t) code) {
1616 case PA_SINK_MESSAGE_ADD_INPUT: {
1617 pa_sink_input *i = PA_SINK_INPUT(userdata);
1619 /* If you change anything here, make sure to change the
1620 * sink input handling a few lines down at
1621 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1623 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1625 /* Since the caller sleeps in pa_sink_input_put(), we can
1626 * safely access data outside of thread_info even though
1629 if ((i->thread_info.sync_prev = i->sync_prev)) {
1630 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1631 pa_assert(i->sync_prev->sync_next == i);
1632 i->thread_info.sync_prev->thread_info.sync_next = i;
1635 if ((i->thread_info.sync_next = i->sync_next)) {
1636 pa_assert(i->sink == i->thread_info.sync_next->sink);
1637 pa_assert(i->sync_next->sync_prev == i);
1638 i->thread_info.sync_next->thread_info.sync_prev = i;
1641 pa_assert(!i->thread_info.attached);
1642 i->thread_info.attached = TRUE;
1647 pa_sink_input_set_state_within_thread(i, i->state);
1649 /* The requested latency of the sink input needs to be
1650 * fixed up and then configured on the sink */
1652 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1653 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1655 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1656 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1658 /* We don't rewind here automatically. This is left to the
1659 * sink input implementor because some sink inputs need a
1660 * slow start, i.e. need some time to buffer client
1661 * samples before beginning streaming. */
1663 /* In flat volume mode we need to update the volume as
1665 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1668 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1669 pa_sink_input *i = PA_SINK_INPUT(userdata);
1671 /* If you change anything here, make sure to change the
1672 * sink input handling a few lines down at
1673 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1678 pa_sink_input_set_state_within_thread(i, i->state);
1680 pa_assert(i->thread_info.attached);
1681 i->thread_info.attached = FALSE;
1683 /* Since the caller sleeps in pa_sink_input_unlink(),
1684 * we can safely access data outside of thread_info even
1685 * though it is mutable */
1687 pa_assert(!i->sync_prev);
1688 pa_assert(!i->sync_next);
1690 if (i->thread_info.sync_prev) {
1691 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1692 i->thread_info.sync_prev = NULL;
1695 if (i->thread_info.sync_next) {
1696 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1697 i->thread_info.sync_next = NULL;
1700 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1701 pa_sink_input_unref(i);
1703 pa_sink_invalidate_requested_latency(s);
1704 pa_sink_request_rewind(s, (size_t) -1);
1706 /* In flat volume mode we need to update the volume as
1708 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1711 case PA_SINK_MESSAGE_START_MOVE: {
1712 pa_sink_input *i = PA_SINK_INPUT(userdata);
1714 /* We don't support moving synchronized streams. */
1715 pa_assert(!i->sync_prev);
1716 pa_assert(!i->sync_next);
1717 pa_assert(!i->thread_info.sync_next);
1718 pa_assert(!i->thread_info.sync_prev);
1720 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1722 size_t sink_nbytes, total_nbytes;
1724 /* Get the latency of the sink */
1725 if (!(s->flags & PA_SINK_LATENCY) ||
1726 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1729 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1730 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1732 if (total_nbytes > 0) {
1733 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1734 i->thread_info.rewrite_flush = TRUE;
1735 pa_sink_input_process_rewind(i, sink_nbytes);
1742 pa_assert(i->thread_info.attached);
1743 i->thread_info.attached = FALSE;
1745 /* Let's remove the sink input ...*/
1746 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1747 pa_sink_input_unref(i);
1749 pa_sink_invalidate_requested_latency(s);
1751 pa_log_debug("Requesting rewind due to started move");
1752 pa_sink_request_rewind(s, (size_t) -1);
1754 /* In flat volume mode we need to update the volume as
1756 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1759 case PA_SINK_MESSAGE_FINISH_MOVE: {
1760 pa_sink_input *i = PA_SINK_INPUT(userdata);
1762 /* We don't support moving synchronized streams. */
1763 pa_assert(!i->sync_prev);
1764 pa_assert(!i->sync_next);
1765 pa_assert(!i->thread_info.sync_next);
1766 pa_assert(!i->thread_info.sync_prev);
1768 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1770 pa_assert(!i->thread_info.attached);
1771 i->thread_info.attached = TRUE;
1776 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1777 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1779 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1780 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1782 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1786 /* Get the latency of the sink */
1787 if (!(s->flags & PA_SINK_LATENCY) ||
1788 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1791 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1794 pa_sink_input_drop(i, nbytes);
1796 pa_log_debug("Requesting rewind due to finished move");
1797 pa_sink_request_rewind(s, nbytes);
1800 /* In flat volume mode we need to update the volume as
1802 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1805 case PA_SINK_MESSAGE_SET_VOLUME:
1807 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1808 s->thread_info.soft_volume = s->soft_volume;
1809 pa_sink_request_rewind(s, (size_t) -1);
1812 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1815 /* Fall through ... */
1817 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1818 sync_input_volumes_within_thread(s);
1821 case PA_SINK_MESSAGE_GET_VOLUME:
1824 case PA_SINK_MESSAGE_SET_MUTE:
1826 if (s->thread_info.soft_muted != s->muted) {
1827 s->thread_info.soft_muted = s->muted;
1828 pa_sink_request_rewind(s, (size_t) -1);
1833 case PA_SINK_MESSAGE_GET_MUTE:
1836 case PA_SINK_MESSAGE_SET_STATE: {
1838 pa_bool_t suspend_change =
1839 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1840 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1842 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1844 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1845 s->thread_info.rewind_nbytes = 0;
1846 s->thread_info.rewind_requested = FALSE;
1849 if (suspend_change) {
1853 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1854 if (i->suspend_within_thread)
1855 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1861 case PA_SINK_MESSAGE_DETACH:
1863 /* Detach all streams */
1864 pa_sink_detach_within_thread(s);
1867 case PA_SINK_MESSAGE_ATTACH:
1869 /* Reattach all streams */
1870 pa_sink_attach_within_thread(s);
1873 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1875 pa_usec_t *usec = userdata;
1876 *usec = pa_sink_get_requested_latency_within_thread(s);
1878 if (*usec == (pa_usec_t) -1)
1879 *usec = s->thread_info.max_latency;
1884 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1885 pa_usec_t *r = userdata;
1887 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1892 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1893 pa_usec_t *r = userdata;
1895 r[0] = s->thread_info.min_latency;
1896 r[1] = s->thread_info.max_latency;
1901 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1903 *((size_t*) userdata) = s->thread_info.max_rewind;
1906 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1908 *((size_t*) userdata) = s->thread_info.max_request;
1911 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1913 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1916 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1918 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1921 case PA_SINK_MESSAGE_GET_LATENCY:
1922 case PA_SINK_MESSAGE_MAX:
1929 /* Called from main thread */
1930 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1935 pa_core_assert_ref(c);
1936 pa_assert(cause != 0);
1938 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1941 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1948 /* Called from main thread */
1949 void pa_sink_detach(pa_sink *s) {
1950 pa_sink_assert_ref(s);
1951 pa_assert(PA_SINK_IS_LINKED(s->state));
1953 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1956 /* Called from main thread */
1957 void pa_sink_attach(pa_sink *s) {
1958 pa_sink_assert_ref(s);
1959 pa_assert(PA_SINK_IS_LINKED(s->state));
1961 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1964 /* Called from IO thread */
1965 void pa_sink_detach_within_thread(pa_sink *s) {
1969 pa_sink_assert_ref(s);
1970 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1972 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1976 if (s->monitor_source)
1977 pa_source_detach_within_thread(s->monitor_source);
1980 /* Called from IO thread */
1981 void pa_sink_attach_within_thread(pa_sink *s) {
1985 pa_sink_assert_ref(s);
1986 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1988 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1992 if (s->monitor_source)
1993 pa_source_attach_within_thread(s->monitor_source);
1996 /* Called from IO thread */
1997 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1998 pa_sink_assert_ref(s);
1999 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2001 if (s->thread_info.state == PA_SINK_SUSPENDED)
2004 if (nbytes == (size_t) -1)
2005 nbytes = s->thread_info.max_rewind;
2007 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2009 if (s->thread_info.rewind_requested &&
2010 nbytes <= s->thread_info.rewind_nbytes)
2013 s->thread_info.rewind_nbytes = nbytes;
2014 s->thread_info.rewind_requested = TRUE;
2016 if (s->request_rewind)
2017 s->request_rewind(s);
2020 /* Called from IO thread */
2021 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2022 pa_usec_t result = (pa_usec_t) -1;
2025 pa_usec_t monitor_latency;
2027 pa_sink_assert_ref(s);
2029 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2030 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2032 if (s->thread_info.requested_latency_valid)
2033 return s->thread_info.requested_latency;
2035 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2037 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2038 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2039 result = i->thread_info.requested_sink_latency;
2041 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2043 if (monitor_latency != (pa_usec_t) -1 &&
2044 (result == (pa_usec_t) -1 || result > monitor_latency))
2045 result = monitor_latency;
2047 if (result != (pa_usec_t) -1)
2048 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2050 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2051 /* Only cache if properly initialized */
2052 s->thread_info.requested_latency = result;
2053 s->thread_info.requested_latency_valid = TRUE;
2059 /* Called from main thread */
2060 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2063 pa_sink_assert_ref(s);
2064 pa_assert(PA_SINK_IS_LINKED(s->state));
2066 if (s->state == PA_SINK_SUSPENDED)
2069 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2073 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2074 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2078 pa_sink_assert_ref(s);
2080 if (max_rewind == s->thread_info.max_rewind)
2083 s->thread_info.max_rewind = max_rewind;
2085 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2086 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2087 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2090 if (s->monitor_source)
2091 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2094 /* Called from main thread */
2095 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2096 pa_sink_assert_ref(s);
2098 if (PA_SINK_IS_LINKED(s->state))
2099 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2101 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2104 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2105 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2108 pa_sink_assert_ref(s);
2110 if (max_request == s->thread_info.max_request)
2113 s->thread_info.max_request = max_request;
2115 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2118 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2119 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2123 /* Called from main thread */
2124 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2125 pa_sink_assert_ref(s);
2127 if (PA_SINK_IS_LINKED(s->state))
2128 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2130 pa_sink_set_max_request_within_thread(s, max_request);
2133 /* Called from IO thread */
2134 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2138 pa_sink_assert_ref(s);
2140 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2143 s->thread_info.requested_latency_valid = FALSE;
2145 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2147 if (s->update_requested_latency)
2148 s->update_requested_latency(s);
2150 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2151 if (i->update_sink_requested_latency)
2152 i->update_sink_requested_latency(i);
2156 /* Called from main thread */
2157 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2158 pa_sink_assert_ref(s);
2160 /* min_latency == 0: no limit
2161 * min_latency anything else: specified limit
2163 * Similar for max_latency */
2165 if (min_latency < ABSOLUTE_MIN_LATENCY)
2166 min_latency = ABSOLUTE_MIN_LATENCY;
2168 if (max_latency <= 0 ||
2169 max_latency > ABSOLUTE_MAX_LATENCY)
2170 max_latency = ABSOLUTE_MAX_LATENCY;
2172 pa_assert(min_latency <= max_latency);
2174 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2175 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2176 max_latency == ABSOLUTE_MAX_LATENCY) ||
2177 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2179 if (PA_SINK_IS_LINKED(s->state)) {
2185 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2187 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2190 /* Called from main thread */
2191 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2192 pa_sink_assert_ref(s);
2193 pa_assert(min_latency);
2194 pa_assert(max_latency);
2196 if (PA_SINK_IS_LINKED(s->state)) {
2197 pa_usec_t r[2] = { 0, 0 };
2199 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2201 *min_latency = r[0];
2202 *max_latency = r[1];
2204 *min_latency = s->thread_info.min_latency;
2205 *max_latency = s->thread_info.max_latency;
2209 /* Called from IO thread */
2210 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2213 pa_sink_assert_ref(s);
2215 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2216 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2217 pa_assert(min_latency <= max_latency);
2219 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2220 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2221 max_latency == ABSOLUTE_MAX_LATENCY) ||
2222 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2224 s->thread_info.min_latency = min_latency;
2225 s->thread_info.max_latency = max_latency;
2227 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2230 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2231 if (i->update_sink_latency_range)
2232 i->update_sink_latency_range(i);
2235 pa_sink_invalidate_requested_latency(s);
2237 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2240 /* Called from main thread, before the sink is put */
2241 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2242 pa_sink_assert_ref(s);
2244 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2246 if (latency < ABSOLUTE_MIN_LATENCY)
2247 latency = ABSOLUTE_MIN_LATENCY;
2249 if (latency > ABSOLUTE_MAX_LATENCY)
2250 latency = ABSOLUTE_MAX_LATENCY;
2252 s->fixed_latency = latency;
2253 pa_source_set_fixed_latency(s->monitor_source, latency);
2256 /* Called from main context */
2257 size_t pa_sink_get_max_rewind(pa_sink *s) {
2259 pa_sink_assert_ref(s);
2261 if (!PA_SINK_IS_LINKED(s->state))
2262 return s->thread_info.max_rewind;
2264 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2269 /* Called from main context */
2270 size_t pa_sink_get_max_request(pa_sink *s) {
2272 pa_sink_assert_ref(s);
2274 if (!PA_SINK_IS_LINKED(s->state))
2275 return s->thread_info.max_request;
2277 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2282 /* Called from main context */
2283 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2284 pa_device_port *port;
2289 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2290 return -PA_ERR_NOTIMPLEMENTED;
2294 return -PA_ERR_NOENTITY;
2296 if (!(port = pa_hashmap_get(s->ports, name)))
2297 return -PA_ERR_NOENTITY;
2299 if (s->active_port == port) {
2300 s->save_port = s->save_port || save;
2304 if ((s->set_port(s, port)) < 0)
2305 return -PA_ERR_NOENTITY;
2307 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2309 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2311 s->active_port = port;
2312 s->save_port = save;
2317 /* Called from main context */
2318 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2319 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2323 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2326 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2328 if (pa_streq(ff, "microphone"))
2329 t = "audio-input-microphone";
2330 else if (pa_streq(ff, "webcam"))
2332 else if (pa_streq(ff, "computer"))
2334 else if (pa_streq(ff, "handset"))
2336 else if (pa_streq(ff, "portable"))
2337 t = "multimedia-player";
2338 else if (pa_streq(ff, "tv"))
2339 t = "video-display";
2342 * The following icons are not part of the icon naming spec,
2343 * because Rodney Dawes sucks as the maintainer of that spec.
2345 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2347 else if (pa_streq(ff, "headset"))
2348 t = "audio-headset";
2349 else if (pa_streq(ff, "headphone"))
2350 t = "audio-headphones";
2351 else if (pa_streq(ff, "speaker"))
2352 t = "audio-speakers";
2353 else if (pa_streq(ff, "hands-free"))
2354 t = "audio-handsfree";
2358 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2359 if (pa_streq(c, "modem"))
2366 t = "audio-input-microphone";
2369 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2370 if (strstr(profile, "analog"))
2372 else if (strstr(profile, "iec958"))
2374 else if (strstr(profile, "hdmi"))
2378 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2380 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2385 pa_bool_t pa_device_init_description(pa_proplist *p) {
2389 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2392 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2393 if (pa_streq(s, "internal")) {
2394 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2398 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2399 if (pa_streq(s, "modem")) {
2400 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2404 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2405 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2412 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2416 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2419 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2420 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2421 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");