2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
63 data->proplist = pa_proplist_new();
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
113 pa_proplist_free(data->proplist);
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
142 void pa_device_port_free(pa_device_port *p) {
146 pa_xfree(p->description);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
194 pa_namereg_unregister(core, name);
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
217 if (!data->muted_is_set)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
229 pa_namereg_unregister(core, name);
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
237 s->state = PA_SINK_INIT;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
249 s->inputs = pa_idxset_new(NULL, NULL);
252 s->reference_volume = s->virtual_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
259 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
267 /* As a minor optimization we just steal the list instead of
269 s->ports = data->ports;
272 s->active_port = NULL;
273 s->save_port = FALSE;
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
279 if (!s->active_port && s->ports) {
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
291 pa_silence_memchunk_get(
292 &core->silence_cache,
298 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
299 s->thread_info.soft_volume = s->soft_volume;
300 s->thread_info.soft_muted = s->muted;
301 s->thread_info.state = s->state;
302 s->thread_info.rewind_nbytes = 0;
303 s->thread_info.rewind_requested = FALSE;
304 s->thread_info.max_rewind = 0;
305 s->thread_info.max_request = 0;
306 s->thread_info.requested_latency_valid = FALSE;
307 s->thread_info.requested_latency = 0;
308 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
309 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
314 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
316 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
317 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
320 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
321 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pa_source_new_data_init(&source_data);
326 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
327 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
328 source_data.name = pa_sprintf_malloc("%s.monitor", name);
329 source_data.driver = data->driver;
330 source_data.module = data->module;
331 source_data.card = data->card;
333 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
334 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
335 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
337 s->monitor_source = pa_source_new(core, &source_data,
338 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
339 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
341 pa_source_new_data_done(&source_data);
343 if (!s->monitor_source) {
349 s->monitor_source->monitor_of = s;
351 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
352 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357 /* Called from main context */
358 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
360 pa_bool_t suspend_change;
361 pa_sink_state_t original_state;
364 pa_assert_ctl_context();
366 if (s->state == state)
369 original_state = s->state;
372 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
373 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
376 if ((ret = s->set_state(s, state)) < 0)
380 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
383 s->set_state(s, original_state);
390 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
395 if (suspend_change) {
399 /* We're suspending or resuming, tell everyone about it */
401 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
402 if (s->state == PA_SINK_SUSPENDED &&
403 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
404 pa_sink_input_kill(i);
406 i->suspend(i, state == PA_SINK_SUSPENDED);
408 if (s->monitor_source)
409 pa_source_sync_suspend(s->monitor_source);
415 /* Called from main context */
416 void pa_sink_put(pa_sink* s) {
417 pa_sink_assert_ref(s);
418 pa_assert_ctl_context();
420 pa_assert(s->state == PA_SINK_INIT);
422 /* The following fields must be initialized properly when calling _put() */
423 pa_assert(s->asyncmsgq);
424 pa_assert(s->rtpoll);
425 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
427 /* Generally, flags should be initialized via pa_sink_new(). As a
428 * special exception we allow volume related flags to be set
429 * between _new() and _put(). */
431 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
432 s->flags |= PA_SINK_DECIBEL_VOLUME;
434 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
435 s->flags |= PA_SINK_FLAT_VOLUME;
437 s->thread_info.soft_volume = s->soft_volume;
438 s->thread_info.soft_muted = s->muted;
440 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
441 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
442 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
443 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
444 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
446 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
447 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
448 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
450 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
452 pa_source_put(s->monitor_source);
454 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
455 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
458 /* Called from main context */
459 void pa_sink_unlink(pa_sink* s) {
461 pa_sink_input *i, *j = NULL;
464 pa_assert_ctl_context();
466 /* Please note that pa_sink_unlink() does more than simply
467 * reversing pa_sink_put(). It also undoes the registrations
468 * already done in pa_sink_new()! */
470 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
471 * may be called multiple times on the same sink without bad
474 linked = PA_SINK_IS_LINKED(s->state);
477 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
479 if (s->state != PA_SINK_UNLINKED)
480 pa_namereg_unregister(s->core, s->name);
481 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
484 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
486 while ((i = pa_idxset_first(s->inputs, NULL))) {
488 pa_sink_input_kill(i);
493 sink_set_state(s, PA_SINK_UNLINKED);
495 s->state = PA_SINK_UNLINKED;
499 if (s->monitor_source)
500 pa_source_unlink(s->monitor_source);
503 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
504 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
508 /* Called from main context */
509 static void sink_free(pa_object *o) {
510 pa_sink *s = PA_SINK(o);
514 pa_assert_ctl_context();
515 pa_assert(pa_sink_refcnt(s) == 0);
517 if (PA_SINK_IS_LINKED(s->state))
520 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
522 if (s->monitor_source) {
523 pa_source_unref(s->monitor_source);
524 s->monitor_source = NULL;
527 pa_idxset_free(s->inputs, NULL, NULL);
529 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
530 pa_sink_input_unref(i);
532 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
534 if (s->silence.memblock)
535 pa_memblock_unref(s->silence.memblock);
541 pa_proplist_free(s->proplist);
546 while ((p = pa_hashmap_steal_first(s->ports)))
547 pa_device_port_free(p);
549 pa_hashmap_free(s->ports, NULL, NULL);
555 /* Called from main context */
556 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
557 pa_sink_assert_ref(s);
558 pa_assert_ctl_context();
562 if (s->monitor_source)
563 pa_source_set_asyncmsgq(s->monitor_source, q);
566 /* Called from main context */
567 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
568 pa_sink_assert_ref(s);
569 pa_assert_ctl_context();
573 if (s->monitor_source)
574 pa_source_set_rtpoll(s->monitor_source, p);
577 /* Called from main context */
578 int pa_sink_update_status(pa_sink*s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
581 pa_assert(PA_SINK_IS_LINKED(s->state));
583 if (s->state == PA_SINK_SUSPENDED)
586 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
589 /* Called from main context */
590 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
591 pa_sink_assert_ref(s);
592 pa_assert_ctl_context();
593 pa_assert(PA_SINK_IS_LINKED(s->state));
594 pa_assert(cause != 0);
597 s->suspend_cause |= cause;
598 s->monitor_source->suspend_cause |= cause;
600 s->suspend_cause &= ~cause;
601 s->monitor_source->suspend_cause &= ~cause;
604 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
607 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
609 if (s->suspend_cause)
610 return sink_set_state(s, PA_SINK_SUSPENDED);
612 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 /* Called from main context */
616 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
617 pa_sink_input *i, *n;
620 pa_sink_assert_ref(s);
621 pa_assert_ctl_context();
622 pa_assert(PA_SINK_IS_LINKED(s->state));
627 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
628 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
630 pa_sink_input_ref(i);
632 if (pa_sink_input_start_move(i) >= 0)
635 pa_sink_input_unref(i);
641 /* Called from main context */
642 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
645 pa_sink_assert_ref(s);
646 pa_assert_ctl_context();
647 pa_assert(PA_SINK_IS_LINKED(s->state));
650 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
651 if (pa_sink_input_finish_move(i, s, save) < 0)
652 pa_sink_input_fail_move(i);
654 pa_sink_input_unref(i);
657 pa_queue_free(q, NULL, NULL);
660 /* Called from main context */
661 void pa_sink_move_all_fail(pa_queue *q) {
664 pa_assert_ctl_context();
667 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
668 pa_sink_input_fail_move(i);
669 pa_sink_input_unref(i);
672 pa_queue_free(q, NULL, NULL);
675 /* Called from IO thread context */
676 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
680 pa_sink_assert_ref(s);
681 pa_sink_assert_io_context(s);
682 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
684 /* If nobody requested this and this is actually no real rewind
685 * then we can short cut this. Please note that this means that
686 * not all rewind requests triggered upstream will always be
687 * translated in actual requests! */
688 if (!s->thread_info.rewind_requested && nbytes <= 0)
691 s->thread_info.rewind_nbytes = 0;
692 s->thread_info.rewind_requested = FALSE;
694 if (s->thread_info.state == PA_SINK_SUSPENDED)
698 pa_log_debug("Processing rewind...");
700 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
701 pa_sink_input_assert_ref(i);
702 pa_sink_input_process_rewind(i, nbytes);
706 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
707 pa_source_process_rewind(s->monitor_source, nbytes);
710 /* Called from IO thread context */
711 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
715 size_t mixlength = *length;
717 pa_sink_assert_ref(s);
718 pa_sink_assert_io_context(s);
721 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
722 pa_sink_input_assert_ref(i);
724 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
726 if (mixlength == 0 || info->chunk.length < mixlength)
727 mixlength = info->chunk.length;
729 if (pa_memblock_is_silence(info->chunk.memblock)) {
730 pa_memblock_unref(info->chunk.memblock);
734 info->userdata = pa_sink_input_ref(i);
736 pa_assert(info->chunk.memblock);
737 pa_assert(info->chunk.length > 0);
750 /* Called from IO thread context */
751 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
755 unsigned n_unreffed = 0;
757 pa_sink_assert_ref(s);
758 pa_sink_assert_io_context(s);
760 pa_assert(result->memblock);
761 pa_assert(result->length > 0);
763 /* We optimize for the case where the order of the inputs has not changed */
765 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
767 pa_mix_info* m = NULL;
769 pa_sink_input_assert_ref(i);
771 /* Let's try to find the matching entry info the pa_mix_info array */
772 for (j = 0; j < n; j ++) {
774 if (info[p].userdata == i) {
785 pa_sink_input_drop(i, result->length);
787 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
789 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
794 if (m && m->chunk.memblock) {
796 pa_memblock_ref(c.memblock);
797 pa_assert(result->length <= c.length);
798 c.length = result->length;
800 pa_memchunk_make_writable(&c, 0);
801 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
804 pa_memblock_ref(c.memblock);
805 pa_assert(result->length <= c.length);
806 c.length = result->length;
809 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
810 pa_source_output_assert_ref(o);
811 pa_assert(o->direct_on_input == i);
812 pa_source_post_direct(s->monitor_source, o, &c);
815 pa_memblock_unref(c.memblock);
820 if (m->chunk.memblock)
821 pa_memblock_unref(m->chunk.memblock);
822 pa_memchunk_reset(&m->chunk);
824 pa_sink_input_unref(m->userdata);
831 /* Now drop references to entries that are included in the
832 * pa_mix_info array but don't exist anymore */
834 if (n_unreffed < n) {
835 for (; n > 0; info++, n--) {
837 pa_sink_input_unref(info->userdata);
838 if (info->chunk.memblock)
839 pa_memblock_unref(info->chunk.memblock);
843 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
844 pa_source_post(s->monitor_source, result);
847 /* Called from IO thread context */
848 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
849 pa_mix_info info[MAX_MIX_CHANNELS];
851 size_t block_size_max;
853 pa_sink_assert_ref(s);
854 pa_sink_assert_io_context(s);
855 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
856 pa_assert(pa_frame_aligned(length, &s->sample_spec));
861 pa_assert(!s->thread_info.rewind_requested);
862 pa_assert(s->thread_info.rewind_nbytes == 0);
864 if (s->thread_info.state == PA_SINK_SUSPENDED) {
865 result->memblock = pa_memblock_ref(s->silence.memblock);
866 result->index = s->silence.index;
867 result->length = PA_MIN(s->silence.length, length);
872 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
874 block_size_max = pa_mempool_block_size_max(s->core->mempool);
875 if (length > block_size_max)
876 length = pa_frame_align(block_size_max, &s->sample_spec);
878 pa_assert(length > 0);
880 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
884 *result = s->silence;
885 pa_memblock_ref(result->memblock);
887 if (result->length > length)
888 result->length = length;
893 *result = info[0].chunk;
894 pa_memblock_ref(result->memblock);
896 if (result->length > length)
897 result->length = length;
899 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
901 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
902 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
903 pa_memblock_unref(result->memblock);
904 pa_silence_memchunk_get(&s->core->silence_cache,
910 pa_memchunk_make_writable(result, 0);
911 pa_volume_memchunk(result, &s->sample_spec, &volume);
916 result->memblock = pa_memblock_new(s->core->mempool, length);
918 ptr = pa_memblock_acquire(result->memblock);
919 result->length = pa_mix(info, n,
922 &s->thread_info.soft_volume,
923 s->thread_info.soft_muted);
924 pa_memblock_release(result->memblock);
929 inputs_drop(s, info, n, result);
934 /* Called from IO thread context */
935 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
936 pa_mix_info info[MAX_MIX_CHANNELS];
938 size_t length, block_size_max;
940 pa_sink_assert_ref(s);
941 pa_sink_assert_io_context(s);
942 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
944 pa_assert(target->memblock);
945 pa_assert(target->length > 0);
946 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
950 pa_assert(!s->thread_info.rewind_requested);
951 pa_assert(s->thread_info.rewind_nbytes == 0);
953 if (s->thread_info.state == PA_SINK_SUSPENDED) {
954 pa_silence_memchunk(target, &s->sample_spec);
958 length = target->length;
959 block_size_max = pa_mempool_block_size_max(s->core->mempool);
960 if (length > block_size_max)
961 length = pa_frame_align(block_size_max, &s->sample_spec);
963 pa_assert(length > 0);
965 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
968 if (target->length > length)
969 target->length = length;
971 pa_silence_memchunk(target, &s->sample_spec);
975 if (target->length > length)
976 target->length = length;
978 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
980 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
981 pa_silence_memchunk(target, &s->sample_spec);
985 vchunk = info[0].chunk;
986 pa_memblock_ref(vchunk.memblock);
988 if (vchunk.length > length)
989 vchunk.length = length;
991 if (!pa_cvolume_is_norm(&volume)) {
992 pa_memchunk_make_writable(&vchunk, 0);
993 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
996 pa_memchunk_memcpy(target, &vchunk);
997 pa_memblock_unref(vchunk.memblock);
1003 ptr = pa_memblock_acquire(target->memblock);
1005 target->length = pa_mix(info, n,
1006 (uint8_t*) ptr + target->index, length,
1008 &s->thread_info.soft_volume,
1009 s->thread_info.soft_muted);
1011 pa_memblock_release(target->memblock);
1014 inputs_drop(s, info, n, target);
1019 /* Called from IO thread context */
1020 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1024 pa_sink_assert_ref(s);
1025 pa_sink_assert_io_context(s);
1026 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1028 pa_assert(target->memblock);
1029 pa_assert(target->length > 0);
1030 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1034 pa_assert(!s->thread_info.rewind_requested);
1035 pa_assert(s->thread_info.rewind_nbytes == 0);
1044 pa_sink_render_into(s, &chunk);
1053 /* Called from IO thread context */
1054 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1055 pa_mix_info info[MAX_MIX_CHANNELS];
1056 size_t length1st = length;
1059 pa_sink_assert_ref(s);
1060 pa_sink_assert_io_context(s);
1061 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1062 pa_assert(length > 0);
1063 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1068 pa_assert(!s->thread_info.rewind_requested);
1069 pa_assert(s->thread_info.rewind_nbytes == 0);
1071 pa_assert(length > 0);
1073 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1076 pa_silence_memchunk_get(&s->core->silence_cache,
1081 } else if (n == 1) {
1084 *result = info[0].chunk;
1085 pa_memblock_ref(result->memblock);
1087 if (result->length > length)
1088 result->length = length;
1090 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1092 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1093 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1094 pa_memblock_unref(result->memblock);
1095 pa_silence_memchunk_get(&s->core->silence_cache,
1101 pa_memchunk_make_writable(result, length);
1102 pa_volume_memchunk(result, &s->sample_spec, &volume);
1109 result->memblock = pa_memblock_new(s->core->mempool, length);
1111 ptr = pa_memblock_acquire(result->memblock);
1113 result->length = pa_mix(info, n,
1114 (uint8_t*) ptr + result->index, length1st,
1116 &s->thread_info.soft_volume,
1117 s->thread_info.soft_muted);
1119 pa_memblock_release(result->memblock);
1122 inputs_drop(s, info, n, result);
1124 if (result->length < length) {
1127 pa_memchunk_make_writable(result, length);
1129 l = length - result->length;
1130 d = result->index + result->length;
1136 pa_sink_render_into(s, &chunk);
1141 result->length = length;
1147 /* Called from main thread */
1148 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1151 pa_sink_assert_ref(s);
1152 pa_assert_ctl_context();
1153 pa_assert(PA_SINK_IS_LINKED(s->state));
1155 /* The returned value is supposed to be in the time domain of the sound card! */
1157 if (s->state == PA_SINK_SUSPENDED)
1160 if (!(s->flags & PA_SINK_LATENCY))
1163 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1168 /* Called from IO thread */
1169 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1173 pa_sink_assert_ref(s);
1174 pa_sink_assert_io_context(s);
1175 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1177 /* The returned value is supposed to be in the time domain of the sound card! */
1179 if (s->thread_info.state == PA_SINK_SUSPENDED)
1182 if (!(s->flags & PA_SINK_LATENCY))
1185 o = PA_MSGOBJECT(s);
1187 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1189 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1195 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1198 pa_sink_input_assert_ref(i);
1199 pa_assert(new_volume->channels == i->sample_spec.channels);
1202 * This basically calculates:
1204 * i->relative_volume := i->virtual_volume / new_volume
1205 * i->soft_volume := i->relative_volume * i->volume_factor
1208 /* The new sink volume passed in here must already be remapped to
1209 * the sink input's channel map! */
1211 i->soft_volume.channels = i->sample_spec.channels;
1213 for (c = 0; c < i->sample_spec.channels; c++)
1215 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1216 /* We leave i->relative_volume untouched */
1217 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1219 i->relative_volume[c] =
1220 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1221 pa_sw_volume_to_linear(new_volume->values[c]);
1223 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1224 i->relative_volume[c] *
1225 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1228 /* Hooks have the ability to play games with i->soft_volume */
1229 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1231 /* We don't copy the soft_volume to the thread_info data
1232 * here. That must be done by the caller */
1235 /* Called from main thread */
1236 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1240 pa_sink_assert_ref(s);
1241 pa_assert_ctl_context();
1242 pa_assert(new_volume);
1243 pa_assert(PA_SINK_IS_LINKED(s->state));
1244 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1246 /* This is called whenever a sink input volume changes or a sink
1247 * input is added/removed and we might need to fix up the sink
1248 * volume accordingly. Please note that we don't actually update
1249 * the sinks volume here, we only return how it needs to be
1250 * updated. The caller should then call pa_sink_set_volume().*/
1252 if (pa_idxset_isempty(s->inputs)) {
1253 /* In the special case that we have no sink input we leave the
1254 * volume unmodified. */
1255 *new_volume = s->reference_volume;
1259 pa_cvolume_mute(new_volume, s->channel_map.channels);
1261 /* First let's determine the new maximum volume of all inputs
1262 * connected to this sink */
1263 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1265 pa_cvolume remapped_volume;
1267 remapped_volume = i->virtual_volume;
1268 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1270 for (c = 0; c < new_volume->channels; c++)
1271 if (remapped_volume.values[c] > new_volume->values[c])
1272 new_volume->values[c] = remapped_volume.values[c];
1275 /* Then, let's update the soft volumes of all inputs connected
1277 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1278 pa_cvolume remapped_new_volume;
1280 remapped_new_volume = *new_volume;
1281 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1282 compute_new_soft_volume(i, &remapped_new_volume);
1284 /* We don't copy soft_volume to the thread_info data here
1285 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1286 * want the update to be atomically with the sink volume
1287 * update, hence we do it within the pa_sink_set_volume() call
1292 /* Called from main thread */
1293 void pa_sink_propagate_flat_volume(pa_sink *s) {
1297 pa_sink_assert_ref(s);
1298 pa_assert_ctl_context();
1299 pa_assert(PA_SINK_IS_LINKED(s->state));
1300 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1302 /* This is called whenever the sink volume changes that is not
1303 * caused by a sink input volume change. We need to fix up the
1304 * sink input volumes accordingly */
1306 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1307 pa_cvolume sink_volume, new_virtual_volume;
1310 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1312 sink_volume = s->virtual_volume;
1313 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1315 for (c = 0; c < i->sample_spec.channels; c++)
1316 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1317 i->relative_volume[c] *
1318 pa_sw_volume_to_linear(sink_volume.values[c]));
1320 new_virtual_volume.channels = i->sample_spec.channels;
1322 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1323 i->virtual_volume = new_virtual_volume;
1325 /* Hmm, the soft volume might no longer actually match
1326 * what has been chosen as new virtual volume here,
1327 * especially when the old volume was
1328 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1330 compute_new_soft_volume(i, &sink_volume);
1332 /* The virtual volume changed, let's tell people so */
1333 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1337 /* If the soft_volume of any of the sink inputs got changed, let's
1338 * make sure the thread copies are synced up. */
1339 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1342 /* Called from main thread */
1343 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1344 pa_bool_t virtual_volume_changed;
1346 pa_sink_assert_ref(s);
1347 pa_assert_ctl_context();
1348 pa_assert(PA_SINK_IS_LINKED(s->state));
1350 pa_assert(pa_cvolume_valid(volume));
1351 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1353 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1354 s->virtual_volume = *volume;
1355 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1357 if (become_reference)
1358 s->reference_volume = s->virtual_volume;
1360 /* Propagate this volume change back to the inputs */
1361 if (virtual_volume_changed)
1362 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1363 pa_sink_propagate_flat_volume(s);
1365 if (s->set_volume) {
1366 /* If we have a function set_volume(), then we do not apply a
1367 * soft volume by default. However, set_volume() is free to
1368 * apply one to s->soft_volume */
1370 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1374 /* If we have no function set_volume(), then the soft volume
1375 * becomes the virtual volume */
1376 s->soft_volume = s->virtual_volume;
1378 /* This tells the sink that soft and/or virtual volume changed */
1380 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1382 if (virtual_volume_changed)
1383 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1386 /* Called from main thread. Only to be called by sink implementor */
1387 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1388 pa_sink_assert_ref(s);
1389 pa_assert_ctl_context();
1392 s->soft_volume = *volume;
1394 if (PA_SINK_IS_LINKED(s->state))
1395 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1397 s->thread_info.soft_volume = *volume;
1400 /* Called from main thread */
1401 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1402 pa_sink_assert_ref(s);
1403 pa_assert_ctl_context();
1404 pa_assert(PA_SINK_IS_LINKED(s->state));
1406 if (s->refresh_volume || force_refresh) {
1407 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1412 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1414 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1416 s->reference_volume = s->virtual_volume;
1418 /* Something got changed in the hardware. It probably
1419 * makes sense to save changed hw settings given that hw
1420 * volume changes not triggered by PA are almost certainly
1421 * done by the user. */
1422 s->save_volume = TRUE;
1424 if (s->flags & PA_SINK_FLAT_VOLUME)
1425 pa_sink_propagate_flat_volume(s);
1427 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1431 return reference ? &s->reference_volume : &s->virtual_volume;
1434 /* Called from main thread */
1435 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1436 pa_sink_assert_ref(s);
1437 pa_assert_ctl_context();
1438 pa_assert(PA_SINK_IS_LINKED(s->state));
1440 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1441 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1444 s->reference_volume = s->virtual_volume = *new_volume;
1445 s->save_volume = TRUE;
1447 if (s->flags & PA_SINK_FLAT_VOLUME)
1448 pa_sink_propagate_flat_volume(s);
1450 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1453 /* Called from main thread */
1454 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1455 pa_bool_t old_muted;
1457 pa_sink_assert_ref(s);
1458 pa_assert_ctl_context();
1459 pa_assert(PA_SINK_IS_LINKED(s->state));
1461 old_muted = s->muted;
1463 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1468 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1470 if (old_muted != s->muted)
1471 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1474 /* Called from main thread */
1475 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1477 pa_sink_assert_ref(s);
1478 pa_assert_ctl_context();
1479 pa_assert(PA_SINK_IS_LINKED(s->state));
1481 if (s->refresh_muted || force_refresh) {
1482 pa_bool_t old_muted = s->muted;
1487 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1489 if (old_muted != s->muted) {
1490 s->save_muted = TRUE;
1492 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1494 /* Make sure the soft mute status stays in sync */
1495 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1503 /* Called from main thread */
1504 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1505 pa_sink_assert_ref(s);
1506 pa_assert_ctl_context();
1507 pa_assert(PA_SINK_IS_LINKED(s->state));
1509 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1511 if (s->muted == new_muted)
1514 s->muted = new_muted;
1515 s->save_muted = TRUE;
1517 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1520 /* Called from main thread */
1521 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1522 pa_sink_assert_ref(s);
1523 pa_assert_ctl_context();
1526 pa_proplist_update(s->proplist, mode, p);
1528 if (PA_SINK_IS_LINKED(s->state)) {
1529 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1530 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1536 /* Called from main thread */
1537 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1538 void pa_sink_set_description(pa_sink *s, const char *description) {
1540 pa_sink_assert_ref(s);
1541 pa_assert_ctl_context();
1543 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1546 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1548 if (old && description && pa_streq(old, description))
1552 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1554 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1556 if (s->monitor_source) {
1559 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1560 pa_source_set_description(s->monitor_source, n);
1564 if (PA_SINK_IS_LINKED(s->state)) {
1565 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1566 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1570 /* Called from main thread */
1571 unsigned pa_sink_linked_by(pa_sink *s) {
1574 pa_sink_assert_ref(s);
1575 pa_assert_ctl_context();
1576 pa_assert(PA_SINK_IS_LINKED(s->state));
1578 ret = pa_idxset_size(s->inputs);
1580 /* We add in the number of streams connected to us here. Please
1581 * note the asymmmetry to pa_sink_used_by()! */
1583 if (s->monitor_source)
1584 ret += pa_source_linked_by(s->monitor_source);
1589 /* Called from main thread */
1590 unsigned pa_sink_used_by(pa_sink *s) {
1593 pa_sink_assert_ref(s);
1594 pa_assert_ctl_context();
1595 pa_assert(PA_SINK_IS_LINKED(s->state));
1597 ret = pa_idxset_size(s->inputs);
1598 pa_assert(ret >= s->n_corked);
1600 /* Streams connected to our monitor source do not matter for
1601 * pa_sink_used_by()!.*/
1603 return ret - s->n_corked;
1606 /* Called from main thread */
1607 unsigned pa_sink_check_suspend(pa_sink *s) {
1612 pa_sink_assert_ref(s);
1613 pa_assert_ctl_context();
1615 if (!PA_SINK_IS_LINKED(s->state))
1620 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1621 pa_sink_input_state_t st;
1623 st = pa_sink_input_get_state(i);
1624 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1626 if (st == PA_SINK_INPUT_CORKED)
1629 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1635 if (s->monitor_source)
1636 ret += pa_source_check_suspend(s->monitor_source);
1641 /* Called from the IO thread */
1642 static void sync_input_volumes_within_thread(pa_sink *s) {
1646 pa_sink_assert_ref(s);
1647 pa_sink_assert_io_context(s);
1649 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1650 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1653 i->thread_info.soft_volume = i->soft_volume;
1654 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1658 /* Called from IO thread, except when it is not */
1659 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1660 pa_sink *s = PA_SINK(o);
1661 pa_sink_assert_ref(s);
1663 switch ((pa_sink_message_t) code) {
1665 case PA_SINK_MESSAGE_ADD_INPUT: {
1666 pa_sink_input *i = PA_SINK_INPUT(userdata);
1668 /* If you change anything here, make sure to change the
1669 * sink input handling a few lines down at
1670 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1672 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1674 /* Since the caller sleeps in pa_sink_input_put(), we can
1675 * safely access data outside of thread_info even though
1678 if ((i->thread_info.sync_prev = i->sync_prev)) {
1679 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1680 pa_assert(i->sync_prev->sync_next == i);
1681 i->thread_info.sync_prev->thread_info.sync_next = i;
1684 if ((i->thread_info.sync_next = i->sync_next)) {
1685 pa_assert(i->sink == i->thread_info.sync_next->sink);
1686 pa_assert(i->sync_next->sync_prev == i);
1687 i->thread_info.sync_next->thread_info.sync_prev = i;
1690 pa_assert(!i->thread_info.attached);
1691 i->thread_info.attached = TRUE;
1696 pa_sink_input_set_state_within_thread(i, i->state);
1698 /* The requested latency of the sink input needs to be
1699 * fixed up and then configured on the sink */
1701 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1702 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1704 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1705 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1707 /* We don't rewind here automatically. This is left to the
1708 * sink input implementor because some sink inputs need a
1709 * slow start, i.e. need some time to buffer client
1710 * samples before beginning streaming. */
1712 /* In flat volume mode we need to update the volume as
1714 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1717 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1718 pa_sink_input *i = PA_SINK_INPUT(userdata);
1720 /* If you change anything here, make sure to change the
1721 * sink input handling a few lines down at
1722 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1727 pa_sink_input_set_state_within_thread(i, i->state);
1729 pa_assert(i->thread_info.attached);
1730 i->thread_info.attached = FALSE;
1732 /* Since the caller sleeps in pa_sink_input_unlink(),
1733 * we can safely access data outside of thread_info even
1734 * though it is mutable */
1736 pa_assert(!i->sync_prev);
1737 pa_assert(!i->sync_next);
1739 if (i->thread_info.sync_prev) {
1740 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1741 i->thread_info.sync_prev = NULL;
1744 if (i->thread_info.sync_next) {
1745 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1746 i->thread_info.sync_next = NULL;
1749 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1750 pa_sink_input_unref(i);
1752 pa_sink_invalidate_requested_latency(s);
1753 pa_sink_request_rewind(s, (size_t) -1);
1755 /* In flat volume mode we need to update the volume as
1757 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1760 case PA_SINK_MESSAGE_START_MOVE: {
1761 pa_sink_input *i = PA_SINK_INPUT(userdata);
1763 /* We don't support moving synchronized streams. */
1764 pa_assert(!i->sync_prev);
1765 pa_assert(!i->sync_next);
1766 pa_assert(!i->thread_info.sync_next);
1767 pa_assert(!i->thread_info.sync_prev);
1769 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1771 size_t sink_nbytes, total_nbytes;
1773 /* Get the latency of the sink */
1774 if (!(s->flags & PA_SINK_LATENCY) ||
1775 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1778 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1779 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1781 if (total_nbytes > 0) {
1782 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1783 i->thread_info.rewrite_flush = TRUE;
1784 pa_sink_input_process_rewind(i, sink_nbytes);
1791 pa_assert(i->thread_info.attached);
1792 i->thread_info.attached = FALSE;
1794 /* Let's remove the sink input ...*/
1795 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1796 pa_sink_input_unref(i);
1798 pa_sink_invalidate_requested_latency(s);
1800 pa_log_debug("Requesting rewind due to started move");
1801 pa_sink_request_rewind(s, (size_t) -1);
1803 /* In flat volume mode we need to update the volume as
1805 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1808 case PA_SINK_MESSAGE_FINISH_MOVE: {
1809 pa_sink_input *i = PA_SINK_INPUT(userdata);
1811 /* We don't support moving synchronized streams. */
1812 pa_assert(!i->sync_prev);
1813 pa_assert(!i->sync_next);
1814 pa_assert(!i->thread_info.sync_next);
1815 pa_assert(!i->thread_info.sync_prev);
1817 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1819 pa_assert(!i->thread_info.attached);
1820 i->thread_info.attached = TRUE;
1825 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1826 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1828 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1829 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1831 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1835 /* Get the latency of the sink */
1836 if (!(s->flags & PA_SINK_LATENCY) ||
1837 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1840 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1843 pa_sink_input_drop(i, nbytes);
1845 pa_log_debug("Requesting rewind due to finished move");
1846 pa_sink_request_rewind(s, nbytes);
1849 /* In flat volume mode we need to update the volume as
1851 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1854 case PA_SINK_MESSAGE_SET_VOLUME:
1856 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1857 s->thread_info.soft_volume = s->soft_volume;
1858 pa_sink_request_rewind(s, (size_t) -1);
1861 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1864 /* Fall through ... */
1866 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1867 sync_input_volumes_within_thread(s);
1870 case PA_SINK_MESSAGE_GET_VOLUME:
1873 case PA_SINK_MESSAGE_SET_MUTE:
1875 if (s->thread_info.soft_muted != s->muted) {
1876 s->thread_info.soft_muted = s->muted;
1877 pa_sink_request_rewind(s, (size_t) -1);
1882 case PA_SINK_MESSAGE_GET_MUTE:
1885 case PA_SINK_MESSAGE_SET_STATE: {
1887 pa_bool_t suspend_change =
1888 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1889 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1891 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1893 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1894 s->thread_info.rewind_nbytes = 0;
1895 s->thread_info.rewind_requested = FALSE;
1898 if (suspend_change) {
1902 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1903 if (i->suspend_within_thread)
1904 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1910 case PA_SINK_MESSAGE_DETACH:
1912 /* Detach all streams */
1913 pa_sink_detach_within_thread(s);
1916 case PA_SINK_MESSAGE_ATTACH:
1918 /* Reattach all streams */
1919 pa_sink_attach_within_thread(s);
1922 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1924 pa_usec_t *usec = userdata;
1925 *usec = pa_sink_get_requested_latency_within_thread(s);
1927 if (*usec == (pa_usec_t) -1)
1928 *usec = s->thread_info.max_latency;
1933 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1934 pa_usec_t *r = userdata;
1936 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1941 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1942 pa_usec_t *r = userdata;
1944 r[0] = s->thread_info.min_latency;
1945 r[1] = s->thread_info.max_latency;
1950 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1952 *((size_t*) userdata) = s->thread_info.max_rewind;
1955 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1957 *((size_t*) userdata) = s->thread_info.max_request;
1960 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1962 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1965 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1967 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1970 case PA_SINK_MESSAGE_GET_LATENCY:
1971 case PA_SINK_MESSAGE_MAX:
1978 /* Called from main thread */
1979 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1984 pa_core_assert_ref(c);
1985 pa_assert_ctl_context();
1986 pa_assert(cause != 0);
1988 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
1991 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1998 /* Called from main thread */
1999 void pa_sink_detach(pa_sink *s) {
2000 pa_sink_assert_ref(s);
2001 pa_assert_ctl_context();
2002 pa_assert(PA_SINK_IS_LINKED(s->state));
2004 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2007 /* Called from main thread */
2008 void pa_sink_attach(pa_sink *s) {
2009 pa_sink_assert_ref(s);
2010 pa_assert_ctl_context();
2011 pa_assert(PA_SINK_IS_LINKED(s->state));
2013 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2016 /* Called from IO thread */
2017 void pa_sink_detach_within_thread(pa_sink *s) {
2021 pa_sink_assert_ref(s);
2022 pa_sink_assert_io_context(s);
2023 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2025 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2029 if (s->monitor_source)
2030 pa_source_detach_within_thread(s->monitor_source);
2033 /* Called from IO thread */
2034 void pa_sink_attach_within_thread(pa_sink *s) {
2038 pa_sink_assert_ref(s);
2039 pa_sink_assert_io_context(s);
2040 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2042 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2046 if (s->monitor_source)
2047 pa_source_attach_within_thread(s->monitor_source);
2050 /* Called from IO thread */
2051 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2052 pa_sink_assert_ref(s);
2053 pa_sink_assert_io_context(s);
2054 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2056 if (s->thread_info.state == PA_SINK_SUSPENDED)
2059 if (nbytes == (size_t) -1)
2060 nbytes = s->thread_info.max_rewind;
2062 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2064 if (s->thread_info.rewind_requested &&
2065 nbytes <= s->thread_info.rewind_nbytes)
2068 s->thread_info.rewind_nbytes = nbytes;
2069 s->thread_info.rewind_requested = TRUE;
2071 if (s->request_rewind)
2072 s->request_rewind(s);
2075 /* Called from IO thread */
2076 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2077 pa_usec_t result = (pa_usec_t) -1;
2080 pa_usec_t monitor_latency;
2082 pa_sink_assert_ref(s);
2083 pa_sink_assert_io_context(s);
2085 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2086 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2088 if (s->thread_info.requested_latency_valid)
2089 return s->thread_info.requested_latency;
2091 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2093 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2094 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2095 result = i->thread_info.requested_sink_latency;
2097 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2099 if (monitor_latency != (pa_usec_t) -1 &&
2100 (result == (pa_usec_t) -1 || result > monitor_latency))
2101 result = monitor_latency;
2103 if (result != (pa_usec_t) -1)
2104 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2106 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2107 /* Only cache if properly initialized */
2108 s->thread_info.requested_latency = result;
2109 s->thread_info.requested_latency_valid = TRUE;
2115 /* Called from main thread */
2116 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2119 pa_sink_assert_ref(s);
2120 pa_assert_ctl_context();
2121 pa_assert(PA_SINK_IS_LINKED(s->state));
2123 if (s->state == PA_SINK_SUSPENDED)
2126 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2130 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2131 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2135 pa_sink_assert_ref(s);
2136 pa_sink_assert_io_context(s);
2138 if (max_rewind == s->thread_info.max_rewind)
2141 s->thread_info.max_rewind = max_rewind;
2143 if (PA_SINK_IS_LINKED(s->thread_info.state))
2144 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2145 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2147 if (s->monitor_source)
2148 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2151 /* Called from main thread */
2152 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2153 pa_sink_assert_ref(s);
2154 pa_assert_ctl_context();
2156 if (PA_SINK_IS_LINKED(s->state))
2157 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2159 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2162 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2163 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2166 pa_sink_assert_ref(s);
2167 pa_sink_assert_io_context(s);
2169 if (max_request == s->thread_info.max_request)
2172 s->thread_info.max_request = max_request;
2174 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2177 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2178 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2182 /* Called from main thread */
2183 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2184 pa_sink_assert_ref(s);
2185 pa_assert_ctl_context();
2187 if (PA_SINK_IS_LINKED(s->state))
2188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2190 pa_sink_set_max_request_within_thread(s, max_request);
2193 /* Called from IO thread */
2194 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2198 pa_sink_assert_ref(s);
2199 pa_sink_assert_io_context(s);
2201 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2204 s->thread_info.requested_latency_valid = FALSE;
2206 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2208 if (s->update_requested_latency)
2209 s->update_requested_latency(s);
2211 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2212 if (i->update_sink_requested_latency)
2213 i->update_sink_requested_latency(i);
2217 /* Called from main thread */
2218 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2219 pa_sink_assert_ref(s);
2220 pa_assert_ctl_context();
2222 /* min_latency == 0: no limit
2223 * min_latency anything else: specified limit
2225 * Similar for max_latency */
2227 if (min_latency < ABSOLUTE_MIN_LATENCY)
2228 min_latency = ABSOLUTE_MIN_LATENCY;
2230 if (max_latency <= 0 ||
2231 max_latency > ABSOLUTE_MAX_LATENCY)
2232 max_latency = ABSOLUTE_MAX_LATENCY;
2234 pa_assert(min_latency <= max_latency);
2236 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2237 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2238 max_latency == ABSOLUTE_MAX_LATENCY) ||
2239 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2241 if (PA_SINK_IS_LINKED(s->state)) {
2247 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2249 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2252 /* Called from main thread */
2253 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2254 pa_sink_assert_ref(s);
2255 pa_assert_ctl_context();
2256 pa_assert(min_latency);
2257 pa_assert(max_latency);
2259 if (PA_SINK_IS_LINKED(s->state)) {
2260 pa_usec_t r[2] = { 0, 0 };
2262 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2264 *min_latency = r[0];
2265 *max_latency = r[1];
2267 *min_latency = s->thread_info.min_latency;
2268 *max_latency = s->thread_info.max_latency;
2272 /* Called from IO thread */
2273 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2276 pa_sink_assert_ref(s);
2277 pa_sink_assert_io_context(s);
2279 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2280 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2281 pa_assert(min_latency <= max_latency);
2283 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2284 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2285 max_latency == ABSOLUTE_MAX_LATENCY) ||
2286 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2288 s->thread_info.min_latency = min_latency;
2289 s->thread_info.max_latency = max_latency;
2291 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2294 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2295 if (i->update_sink_latency_range)
2296 i->update_sink_latency_range(i);
2299 pa_sink_invalidate_requested_latency(s);
2301 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2304 /* Called from main thread, before the sink is put */
2305 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2306 pa_sink_assert_ref(s);
2307 pa_assert_ctl_context();
2308 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2310 if (latency < ABSOLUTE_MIN_LATENCY)
2311 latency = ABSOLUTE_MIN_LATENCY;
2313 if (latency > ABSOLUTE_MAX_LATENCY)
2314 latency = ABSOLUTE_MAX_LATENCY;
2316 s->fixed_latency = latency;
2317 pa_source_set_fixed_latency(s->monitor_source, latency);
2320 /* Called from main context */
2321 size_t pa_sink_get_max_rewind(pa_sink *s) {
2323 pa_sink_assert_ref(s);
2324 pa_assert_ctl_context();
2326 if (!PA_SINK_IS_LINKED(s->state))
2327 return s->thread_info.max_rewind;
2329 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2334 /* Called from main context */
2335 size_t pa_sink_get_max_request(pa_sink *s) {
2337 pa_sink_assert_ref(s);
2338 pa_assert_ctl_context();
2340 if (!PA_SINK_IS_LINKED(s->state))
2341 return s->thread_info.max_request;
2343 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2348 /* Called from main context */
2349 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2350 pa_device_port *port;
2352 pa_sink_assert_ref(s);
2353 pa_assert_ctl_context();
2356 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2357 return -PA_ERR_NOTIMPLEMENTED;
2361 return -PA_ERR_NOENTITY;
2363 if (!(port = pa_hashmap_get(s->ports, name)))
2364 return -PA_ERR_NOENTITY;
2366 if (s->active_port == port) {
2367 s->save_port = s->save_port || save;
2371 if ((s->set_port(s, port)) < 0)
2372 return -PA_ERR_NOENTITY;
2374 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2376 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2378 s->active_port = port;
2379 s->save_port = save;
2384 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2385 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2389 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2392 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2394 if (pa_streq(ff, "microphone"))
2395 t = "audio-input-microphone";
2396 else if (pa_streq(ff, "webcam"))
2398 else if (pa_streq(ff, "computer"))
2400 else if (pa_streq(ff, "handset"))
2402 else if (pa_streq(ff, "portable"))
2403 t = "multimedia-player";
2404 else if (pa_streq(ff, "tv"))
2405 t = "video-display";
2408 * The following icons are not part of the icon naming spec,
2409 * because Rodney Dawes sucks as the maintainer of that spec.
2411 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2413 else if (pa_streq(ff, "headset"))
2414 t = "audio-headset";
2415 else if (pa_streq(ff, "headphone"))
2416 t = "audio-headphones";
2417 else if (pa_streq(ff, "speaker"))
2418 t = "audio-speakers";
2419 else if (pa_streq(ff, "hands-free"))
2420 t = "audio-handsfree";
2424 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2425 if (pa_streq(c, "modem"))
2432 t = "audio-input-microphone";
2435 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2436 if (strstr(profile, "analog"))
2438 else if (strstr(profile, "iec958"))
2440 else if (strstr(profile, "hdmi"))
2444 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2446 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2451 pa_bool_t pa_device_init_description(pa_proplist *p) {
2452 const char *s, *d = NULL, *k;
2455 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2458 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2459 if (pa_streq(s, "internal"))
2460 d = _("Internal Audio");
2463 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2464 if (pa_streq(s, "modem"))
2468 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2473 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2476 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2478 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2483 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2487 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2490 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2491 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2492 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");