2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
57 static void sink_free(pa_object *s);
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
63 data->proplist = pa_proplist_new();
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
72 data->name = pa_xstrdup(name);
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
113 pa_proplist_free(data->proplist);
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
121 pa_hashmap_free(data->ports, NULL, NULL);
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
142 void pa_device_port_free(pa_device_port *p) {
146 pa_xfree(p->description);
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
155 s->get_volume = NULL;
156 s->set_volume = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
164 /* Called from main context */
165 pa_sink* pa_sink_new(
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
179 pa_assert(data->name);
180 pa_assert_ctl_context();
182 s = pa_msgobject_new(pa_sink);
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
190 pa_sink_new_data_set_name(data, name);
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
194 pa_namereg_unregister(core, name);
198 /* FIXME, need to free s here on failure */
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
217 if (!data->muted_is_set)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
229 pa_namereg_unregister(core, name);
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
237 s->state = PA_SINK_INIT;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
249 s->inputs = pa_idxset_new(NULL, NULL);
252 s->reference_volume = s->real_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
264 /* As a minor optimization we just steal the list instead of
266 s->ports = data->ports;
269 s->active_port = NULL;
270 s->save_port = FALSE;
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
276 if (!s->active_port && s->ports) {
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
288 pa_silence_memchunk_get(
289 &core->silence_cache,
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
310 /* FIXME: This should probably be moved to pa_sink_put() */
311 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
314 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
316 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
317 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
320 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
321 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pa_source_new_data_init(&source_data);
326 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
327 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
328 source_data.name = pa_sprintf_malloc("%s.monitor", name);
329 source_data.driver = data->driver;
330 source_data.module = data->module;
331 source_data.card = data->card;
333 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
334 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
335 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
337 s->monitor_source = pa_source_new(core, &source_data,
338 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
339 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
341 pa_source_new_data_done(&source_data);
343 if (!s->monitor_source) {
349 s->monitor_source->monitor_of = s;
351 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
352 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
353 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
358 /* Called from main context */
359 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
361 pa_bool_t suspend_change;
362 pa_sink_state_t original_state;
365 pa_assert_ctl_context();
367 if (s->state == state)
370 original_state = s->state;
373 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
374 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
377 if ((ret = s->set_state(s, state)) < 0)
381 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
384 s->set_state(s, original_state);
391 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
392 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
393 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
396 if (suspend_change) {
400 /* We're suspending or resuming, tell everyone about it */
402 PA_IDXSET_FOREACH(i, s->inputs, idx)
403 if (s->state == PA_SINK_SUSPENDED &&
404 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
405 pa_sink_input_kill(i);
407 i->suspend(i, state == PA_SINK_SUSPENDED);
409 if (s->monitor_source)
410 pa_source_sync_suspend(s->monitor_source);
416 /* Called from main context */
417 void pa_sink_put(pa_sink* s) {
418 pa_sink_assert_ref(s);
419 pa_assert_ctl_context();
421 pa_assert(s->state == PA_SINK_INIT);
423 /* The following fields must be initialized properly when calling _put() */
424 pa_assert(s->asyncmsgq);
425 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
427 /* Generally, flags should be initialized via pa_sink_new(). As a
428 * special exception we allow volume related flags to be set
429 * between _new() and _put(). */
431 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
432 s->flags |= PA_SINK_DECIBEL_VOLUME;
434 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
435 s->flags |= PA_SINK_FLAT_VOLUME;
437 /* We assume that if the sink implementor changed the default
438 * volume he did so in real_volume, because that is the usual
439 * place where he is supposed to place his changes. */
440 s->reference_volume = s->real_volume;
442 s->thread_info.soft_volume = s->soft_volume;
443 s->thread_info.soft_muted = s->muted;
445 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
446 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
447 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
448 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
449 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
451 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
452 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
453 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
455 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
457 pa_source_put(s->monitor_source);
459 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
460 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
463 /* Called from main context */
464 void pa_sink_unlink(pa_sink* s) {
466 pa_sink_input *i, *j = NULL;
469 pa_assert_ctl_context();
471 /* Please note that pa_sink_unlink() does more than simply
472 * reversing pa_sink_put(). It also undoes the registrations
473 * already done in pa_sink_new()! */
475 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
476 * may be called multiple times on the same sink without bad
479 linked = PA_SINK_IS_LINKED(s->state);
482 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
484 if (s->state != PA_SINK_UNLINKED)
485 pa_namereg_unregister(s->core, s->name);
486 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
489 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
491 while ((i = pa_idxset_first(s->inputs, NULL))) {
493 pa_sink_input_kill(i);
498 sink_set_state(s, PA_SINK_UNLINKED);
500 s->state = PA_SINK_UNLINKED;
504 if (s->monitor_source)
505 pa_source_unlink(s->monitor_source);
508 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
509 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 /* Called from main context */
514 static void sink_free(pa_object *o) {
515 pa_sink *s = PA_SINK(o);
519 pa_assert_ctl_context();
520 pa_assert(pa_sink_refcnt(s) == 0);
522 if (PA_SINK_IS_LINKED(s->state))
525 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
527 if (s->monitor_source) {
528 pa_source_unref(s->monitor_source);
529 s->monitor_source = NULL;
532 pa_idxset_free(s->inputs, NULL, NULL);
534 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
535 pa_sink_input_unref(i);
537 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
539 if (s->silence.memblock)
540 pa_memblock_unref(s->silence.memblock);
546 pa_proplist_free(s->proplist);
551 while ((p = pa_hashmap_steal_first(s->ports)))
552 pa_device_port_free(p);
554 pa_hashmap_free(s->ports, NULL, NULL);
560 /* Called from main context, and not while the IO thread is active, please */
561 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
562 pa_sink_assert_ref(s);
563 pa_assert_ctl_context();
567 if (s->monitor_source)
568 pa_source_set_asyncmsgq(s->monitor_source, q);
571 /* Called from main context, and not while the IO thread is active, please */
572 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
573 pa_sink_assert_ref(s);
574 pa_assert_ctl_context();
579 /* For now, allow only a minimal set of flags to be changed. */
580 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
582 s->flags = (s->flags & ~mask) | (value & mask);
584 pa_source_update_flags(s->monitor_source,
585 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
586 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
587 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
588 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
591 /* Called from IO context, or before _put() from main context */
592 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
593 pa_sink_assert_ref(s);
594 pa_sink_assert_io_context(s);
596 s->thread_info.rtpoll = p;
598 if (s->monitor_source)
599 pa_source_set_rtpoll(s->monitor_source, p);
602 /* Called from main context */
603 int pa_sink_update_status(pa_sink*s) {
604 pa_sink_assert_ref(s);
605 pa_assert_ctl_context();
606 pa_assert(PA_SINK_IS_LINKED(s->state));
608 if (s->state == PA_SINK_SUSPENDED)
611 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
614 /* Called from main context */
615 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
616 pa_sink_assert_ref(s);
617 pa_assert_ctl_context();
618 pa_assert(PA_SINK_IS_LINKED(s->state));
619 pa_assert(cause != 0);
622 s->suspend_cause |= cause;
623 s->monitor_source->suspend_cause |= cause;
625 s->suspend_cause &= ~cause;
626 s->monitor_source->suspend_cause &= ~cause;
629 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
632 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
634 if (s->suspend_cause)
635 return sink_set_state(s, PA_SINK_SUSPENDED);
637 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
640 /* Called from main context */
641 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
642 pa_sink_input *i, *n;
645 pa_sink_assert_ref(s);
646 pa_assert_ctl_context();
647 pa_assert(PA_SINK_IS_LINKED(s->state));
652 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
653 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
655 pa_sink_input_ref(i);
657 if (pa_sink_input_start_move(i) >= 0)
660 pa_sink_input_unref(i);
666 /* Called from main context */
667 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
670 pa_sink_assert_ref(s);
671 pa_assert_ctl_context();
672 pa_assert(PA_SINK_IS_LINKED(s->state));
675 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
676 if (pa_sink_input_finish_move(i, s, save) < 0)
677 pa_sink_input_fail_move(i);
679 pa_sink_input_unref(i);
682 pa_queue_free(q, NULL, NULL);
685 /* Called from main context */
686 void pa_sink_move_all_fail(pa_queue *q) {
689 pa_assert_ctl_context();
692 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
693 pa_sink_input_fail_move(i);
694 pa_sink_input_unref(i);
697 pa_queue_free(q, NULL, NULL);
700 /* Called from IO thread context */
701 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_assert_ref(s);
706 pa_sink_assert_io_context(s);
707 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
709 /* If nobody requested this and this is actually no real rewind
710 * then we can short cut this. Please note that this means that
711 * not all rewind requests triggered upstream will always be
712 * translated in actual requests! */
713 if (!s->thread_info.rewind_requested && nbytes <= 0)
716 s->thread_info.rewind_nbytes = 0;
717 s->thread_info.rewind_requested = FALSE;
719 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 pa_log_debug("Processing rewind...");
725 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
726 pa_sink_input_assert_ref(i);
727 pa_sink_input_process_rewind(i, nbytes);
731 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
732 pa_source_process_rewind(s->monitor_source, nbytes);
735 /* Called from IO thread context */
736 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 size_t mixlength = *length;
742 pa_sink_assert_ref(s);
743 pa_sink_assert_io_context(s);
746 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
747 pa_sink_input_assert_ref(i);
749 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
751 if (mixlength == 0 || info->chunk.length < mixlength)
752 mixlength = info->chunk.length;
754 if (pa_memblock_is_silence(info->chunk.memblock)) {
755 pa_memblock_unref(info->chunk.memblock);
759 info->userdata = pa_sink_input_ref(i);
761 pa_assert(info->chunk.memblock);
762 pa_assert(info->chunk.length > 0);
775 /* Called from IO thread context */
776 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 unsigned n_unreffed = 0;
782 pa_sink_assert_ref(s);
783 pa_sink_assert_io_context(s);
785 pa_assert(result->memblock);
786 pa_assert(result->length > 0);
788 /* We optimize for the case where the order of the inputs has not changed */
790 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
792 pa_mix_info* m = NULL;
794 pa_sink_input_assert_ref(i);
796 /* Let's try to find the matching entry info the pa_mix_info array */
797 for (j = 0; j < n; j ++) {
799 if (info[p].userdata == i) {
810 pa_sink_input_drop(i, result->length);
812 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
814 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
819 if (m && m->chunk.memblock) {
821 pa_memblock_ref(c.memblock);
822 pa_assert(result->length <= c.length);
823 c.length = result->length;
825 pa_memchunk_make_writable(&c, 0);
826 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
829 pa_memblock_ref(c.memblock);
830 pa_assert(result->length <= c.length);
831 c.length = result->length;
834 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
835 pa_source_output_assert_ref(o);
836 pa_assert(o->direct_on_input == i);
837 pa_source_post_direct(s->monitor_source, o, &c);
840 pa_memblock_unref(c.memblock);
845 if (m->chunk.memblock)
846 pa_memblock_unref(m->chunk.memblock);
847 pa_memchunk_reset(&m->chunk);
849 pa_sink_input_unref(m->userdata);
856 /* Now drop references to entries that are included in the
857 * pa_mix_info array but don't exist anymore */
859 if (n_unreffed < n) {
860 for (; n > 0; info++, n--) {
862 pa_sink_input_unref(info->userdata);
863 if (info->chunk.memblock)
864 pa_memblock_unref(info->chunk.memblock);
868 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
869 pa_source_post(s->monitor_source, result);
872 /* Called from IO thread context */
873 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
874 pa_mix_info info[MAX_MIX_CHANNELS];
876 size_t block_size_max;
878 pa_sink_assert_ref(s);
879 pa_sink_assert_io_context(s);
880 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
881 pa_assert(pa_frame_aligned(length, &s->sample_spec));
886 pa_assert(!s->thread_info.rewind_requested);
887 pa_assert(s->thread_info.rewind_nbytes == 0);
889 if (s->thread_info.state == PA_SINK_SUSPENDED) {
890 result->memblock = pa_memblock_ref(s->silence.memblock);
891 result->index = s->silence.index;
892 result->length = PA_MIN(s->silence.length, length);
897 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
899 block_size_max = pa_mempool_block_size_max(s->core->mempool);
900 if (length > block_size_max)
901 length = pa_frame_align(block_size_max, &s->sample_spec);
903 pa_assert(length > 0);
905 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
909 *result = s->silence;
910 pa_memblock_ref(result->memblock);
912 if (result->length > length)
913 result->length = length;
918 *result = info[0].chunk;
919 pa_memblock_ref(result->memblock);
921 if (result->length > length)
922 result->length = length;
924 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
926 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
927 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
928 pa_memblock_unref(result->memblock);
929 pa_silence_memchunk_get(&s->core->silence_cache,
935 pa_memchunk_make_writable(result, 0);
936 pa_volume_memchunk(result, &s->sample_spec, &volume);
941 result->memblock = pa_memblock_new(s->core->mempool, length);
943 ptr = pa_memblock_acquire(result->memblock);
944 result->length = pa_mix(info, n,
947 &s->thread_info.soft_volume,
948 s->thread_info.soft_muted);
949 pa_memblock_release(result->memblock);
954 inputs_drop(s, info, n, result);
959 /* Called from IO thread context */
960 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
961 pa_mix_info info[MAX_MIX_CHANNELS];
963 size_t length, block_size_max;
965 pa_sink_assert_ref(s);
966 pa_sink_assert_io_context(s);
967 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
969 pa_assert(target->memblock);
970 pa_assert(target->length > 0);
971 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
975 pa_assert(!s->thread_info.rewind_requested);
976 pa_assert(s->thread_info.rewind_nbytes == 0);
978 if (s->thread_info.state == PA_SINK_SUSPENDED) {
979 pa_silence_memchunk(target, &s->sample_spec);
983 length = target->length;
984 block_size_max = pa_mempool_block_size_max(s->core->mempool);
985 if (length > block_size_max)
986 length = pa_frame_align(block_size_max, &s->sample_spec);
988 pa_assert(length > 0);
990 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
993 if (target->length > length)
994 target->length = length;
996 pa_silence_memchunk(target, &s->sample_spec);
1000 if (target->length > length)
1001 target->length = length;
1003 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1005 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1006 pa_silence_memchunk(target, &s->sample_spec);
1010 vchunk = info[0].chunk;
1011 pa_memblock_ref(vchunk.memblock);
1013 if (vchunk.length > length)
1014 vchunk.length = length;
1016 if (!pa_cvolume_is_norm(&volume)) {
1017 pa_memchunk_make_writable(&vchunk, 0);
1018 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1021 pa_memchunk_memcpy(target, &vchunk);
1022 pa_memblock_unref(vchunk.memblock);
1028 ptr = pa_memblock_acquire(target->memblock);
1030 target->length = pa_mix(info, n,
1031 (uint8_t*) ptr + target->index, length,
1033 &s->thread_info.soft_volume,
1034 s->thread_info.soft_muted);
1036 pa_memblock_release(target->memblock);
1039 inputs_drop(s, info, n, target);
1044 /* Called from IO thread context */
1045 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1049 pa_sink_assert_ref(s);
1050 pa_sink_assert_io_context(s);
1051 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1053 pa_assert(target->memblock);
1054 pa_assert(target->length > 0);
1055 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1059 pa_assert(!s->thread_info.rewind_requested);
1060 pa_assert(s->thread_info.rewind_nbytes == 0);
1069 pa_sink_render_into(s, &chunk);
1078 /* Called from IO thread context */
1079 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1080 pa_mix_info info[MAX_MIX_CHANNELS];
1081 size_t length1st = length;
1084 pa_sink_assert_ref(s);
1085 pa_sink_assert_io_context(s);
1086 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1087 pa_assert(length > 0);
1088 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1096 pa_assert(length > 0);
1098 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1101 pa_silence_memchunk_get(&s->core->silence_cache,
1106 } else if (n == 1) {
1109 *result = info[0].chunk;
1110 pa_memblock_ref(result->memblock);
1112 if (result->length > length)
1113 result->length = length;
1115 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1117 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1118 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1119 pa_memblock_unref(result->memblock);
1120 pa_silence_memchunk_get(&s->core->silence_cache,
1126 pa_memchunk_make_writable(result, length);
1127 pa_volume_memchunk(result, &s->sample_spec, &volume);
1134 result->memblock = pa_memblock_new(s->core->mempool, length);
1136 ptr = pa_memblock_acquire(result->memblock);
1138 result->length = pa_mix(info, n,
1139 (uint8_t*) ptr + result->index, length1st,
1141 &s->thread_info.soft_volume,
1142 s->thread_info.soft_muted);
1144 pa_memblock_release(result->memblock);
1147 inputs_drop(s, info, n, result);
1149 if (result->length < length) {
1152 pa_memchunk_make_writable(result, length);
1154 l = length - result->length;
1155 d = result->index + result->length;
1161 pa_sink_render_into(s, &chunk);
1166 result->length = length;
1172 /* Called from main thread */
1173 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1176 pa_sink_assert_ref(s);
1177 pa_assert_ctl_context();
1178 pa_assert(PA_SINK_IS_LINKED(s->state));
1180 /* The returned value is supposed to be in the time domain of the sound card! */
1182 if (s->state == PA_SINK_SUSPENDED)
1185 if (!(s->flags & PA_SINK_LATENCY))
1188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1193 /* Called from IO thread */
1194 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1198 pa_sink_assert_ref(s);
1199 pa_sink_assert_io_context(s);
1200 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1202 /* The returned value is supposed to be in the time domain of the sound card! */
1204 if (s->thread_info.state == PA_SINK_SUSPENDED)
1207 if (!(s->flags & PA_SINK_LATENCY))
1210 o = PA_MSGOBJECT(s);
1212 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1214 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1220 /* Called from main context */
1221 static void compute_reference_ratios(pa_sink *s) {
1225 pa_sink_assert_ref(s);
1226 pa_assert_ctl_context();
1227 pa_assert(PA_SINK_IS_LINKED(s->state));
1228 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1230 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1232 pa_cvolume remapped;
1235 * Calculates the reference volume from the sink's reference
1236 * volume. This basically calculates:
1238 * i->reference_ratio = i->volume / s->reference_volume
1241 remapped = s->reference_volume;
1242 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1244 i->reference_ratio.channels = i->sample_spec.channels;
1246 for (c = 0; c < i->sample_spec.channels; c++) {
1248 /* We don't update when the sink volume is 0 anyway */
1249 if (remapped.values[c] <= PA_VOLUME_MUTED)
1252 /* Don't update the reference ratio unless necessary */
1253 if (pa_sw_volume_multiply(
1254 i->reference_ratio.values[c],
1255 remapped.values[c]) == i->volume.values[c])
1258 i->reference_ratio.values[c] = pa_sw_volume_divide(
1259 i->volume.values[c],
1260 remapped.values[c]);
1265 /* Called from main context */
1266 static void compute_real_ratios(pa_sink *s) {
1270 pa_sink_assert_ref(s);
1271 pa_assert_ctl_context();
1272 pa_assert(PA_SINK_IS_LINKED(s->state));
1273 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1275 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1277 pa_cvolume remapped;
1280 * This basically calculates:
1282 * i->real_ratio := i->volume / s->real_volume
1283 * i->soft_volume := i->real_ratio * i->volume_factor
1286 remapped = s->real_volume;
1287 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1289 i->real_ratio.channels = i->sample_spec.channels;
1290 i->soft_volume.channels = i->sample_spec.channels;
1292 for (c = 0; c < i->sample_spec.channels; c++) {
1294 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1295 /* We leave i->real_ratio untouched */
1296 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1300 /* Don't lose accuracy unless necessary */
1301 if (pa_sw_volume_multiply(
1302 i->real_ratio.values[c],
1303 remapped.values[c]) != i->volume.values[c])
1305 i->real_ratio.values[c] = pa_sw_volume_divide(
1306 i->volume.values[c],
1307 remapped.values[c]);
1309 i->soft_volume.values[c] = pa_sw_volume_multiply(
1310 i->real_ratio.values[c],
1311 i->volume_factor.values[c]);
1314 /* We don't copy the soft_volume to the thread_info data
1315 * here. That must be done by the caller */
1319 /* Called from main thread */
1320 static void compute_real_volume(pa_sink *s) {
1324 pa_sink_assert_ref(s);
1325 pa_assert_ctl_context();
1326 pa_assert(PA_SINK_IS_LINKED(s->state));
1327 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1329 /* This determines the maximum volume of all streams and sets
1330 * s->real_volume accordingly. */
1332 if (pa_idxset_isempty(s->inputs)) {
1333 /* In the special case that we have no sink input we leave the
1334 * volume unmodified. */
1335 s->real_volume = s->reference_volume;
1339 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1341 /* First let's determine the new maximum volume of all inputs
1342 * connected to this sink */
1343 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1344 pa_cvolume remapped;
1346 remapped = i->volume;
1347 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1348 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1351 /* Then, let's update the real ratios/soft volumes of all inputs
1352 * connected to this sink */
1353 compute_real_ratios(s);
1356 /* Called from main thread */
1357 static void propagate_reference_volume(pa_sink *s) {
1361 pa_sink_assert_ref(s);
1362 pa_assert_ctl_context();
1363 pa_assert(PA_SINK_IS_LINKED(s->state));
1364 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1366 /* This is called whenever the sink volume changes that is not
1367 * caused by a sink input volume change. We need to fix up the
1368 * sink input volumes accordingly */
1370 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1371 pa_cvolume old_volume, remapped;
1373 old_volume = i->volume;
1375 /* This basically calculates:
1377 * i->volume := s->reference_volume * i->reference_ratio */
1379 remapped = s->reference_volume;
1380 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1381 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1383 /* The volume changed, let's tell people so */
1384 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1386 if (i->volume_changed)
1387 i->volume_changed(i);
1389 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1394 /* Called from main thread */
1395 void pa_sink_set_volume(
1397 const pa_cvolume *volume,
1401 pa_cvolume old_reference_volume;
1402 pa_bool_t reference_changed;
1404 pa_sink_assert_ref(s);
1405 pa_assert_ctl_context();
1406 pa_assert(PA_SINK_IS_LINKED(s->state));
1407 pa_assert(!volume || pa_cvolume_valid(volume));
1408 pa_assert(!volume || pa_cvolume_compatible(volume, &s->sample_spec));
1409 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1411 /* If volume is NULL we synchronize the sink's real and reference
1412 * volumes with the stream volumes. If it is not NULL we update
1413 * the reference_volume with it. */
1415 old_reference_volume = s->reference_volume;
1419 s->reference_volume = *volume;
1421 if (s->flags & PA_SINK_FLAT_VOLUME) {
1422 /* OK, propagate this volume change back to the inputs */
1423 propagate_reference_volume(s);
1425 /* And now recalculate the real volume */
1426 compute_real_volume(s);
1428 s->real_volume = s->reference_volume;
1431 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1433 /* Ok, let's determine the new real volume */
1434 compute_real_volume(s);
1436 /* Let's 'push' the reference volume if necessary */
1437 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1439 /* We need to fix the reference ratios of all streams now that
1440 * we changed the reference volume */
1441 compute_reference_ratios(s);
1444 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1445 s->save_volume = (!reference_changed && s->save_volume) || save;
1447 if (s->set_volume) {
1448 /* If we have a function set_volume(), then we do not apply a
1449 * soft volume by default. However, set_volume() is free to
1450 * apply one to s->soft_volume */
1452 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1456 /* If we have no function set_volume(), then the soft volume
1457 * becomes the virtual volume */
1458 s->soft_volume = s->real_volume;
1460 /* This tells the sink that soft and/or virtual volume changed */
1462 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1464 if (reference_changed)
1465 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1468 /* Called from main thread. Only to be called by sink implementor */
1469 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1470 pa_sink_assert_ref(s);
1471 pa_assert_ctl_context();
1474 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1476 s->soft_volume = *volume;
1478 if (PA_SINK_IS_LINKED(s->state))
1479 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1481 s->thread_info.soft_volume = s->soft_volume;
1484 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1487 pa_cvolume old_reference_volume;
1489 pa_sink_assert_ref(s);
1490 pa_assert_ctl_context();
1491 pa_assert(PA_SINK_IS_LINKED(s->state));
1493 /* This is called when the hardware's real volume changes due to
1494 * some external event. We copy the real volume into our
1495 * reference volume and then rebuild the stream volumes based on
1496 * i->real_ratio which should stay fixed. */
1498 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1501 old_reference_volume = s->reference_volume;
1503 /* 1. Make the real volume the reference volume */
1504 s->reference_volume = s->real_volume;
1506 if (s->flags & PA_SINK_FLAT_VOLUME) {
1508 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1509 pa_cvolume old_volume, remapped;
1511 old_volume = i->volume;
1513 /* 2. Since the sink's reference and real volumes are equal
1514 * now our ratios should be too. */
1515 i->reference_ratio = i->real_ratio;
1517 /* 3. Recalculate the new stream reference volume based on the
1518 * reference ratio and the sink's reference volume.
1520 * This basically calculates:
1522 * i->volume = s->reference_volume * i->reference_ratio
1524 * This is identical to propagate_reference_volume() */
1525 remapped = s->reference_volume;
1526 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1527 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1529 /* Notify if something changed */
1530 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1532 if (i->volume_changed)
1533 i->volume_changed(i);
1535 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1540 /* Something got changed in the hardware. It probably makes sense
1541 * to save changed hw settings given that hw volume changes not
1542 * triggered by PA are almost certainly done by the user. */
1543 s->save_volume = TRUE;
1545 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1546 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1549 /* Called from main thread */
1550 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1551 pa_sink_assert_ref(s);
1552 pa_assert_ctl_context();
1553 pa_assert(PA_SINK_IS_LINKED(s->state));
1555 if (s->refresh_volume || force_refresh) {
1556 struct pa_cvolume old_real_volume;
1558 old_real_volume = s->real_volume;
1563 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1565 propagate_real_volume(s, &old_real_volume);
1568 return &s->reference_volume;
1571 /* Called from main thread */
1572 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1573 pa_cvolume old_real_volume;
1575 pa_sink_assert_ref(s);
1576 pa_assert_ctl_context();
1577 pa_assert(PA_SINK_IS_LINKED(s->state));
1579 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1581 old_real_volume = s->real_volume;
1582 s->real_volume = *new_real_volume;
1584 propagate_real_volume(s, &old_real_volume);
1587 /* Called from main thread */
1588 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1589 pa_bool_t old_muted;
1591 pa_sink_assert_ref(s);
1592 pa_assert_ctl_context();
1593 pa_assert(PA_SINK_IS_LINKED(s->state));
1595 old_muted = s->muted;
1597 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1602 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1604 if (old_muted != s->muted)
1605 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1608 /* Called from main thread */
1609 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1611 pa_sink_assert_ref(s);
1612 pa_assert_ctl_context();
1613 pa_assert(PA_SINK_IS_LINKED(s->state));
1615 if (s->refresh_muted || force_refresh) {
1616 pa_bool_t old_muted = s->muted;
1621 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1623 if (old_muted != s->muted) {
1624 s->save_muted = TRUE;
1626 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1628 /* Make sure the soft mute status stays in sync */
1629 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1636 /* Called from main thread */
1637 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1638 pa_sink_assert_ref(s);
1639 pa_assert_ctl_context();
1640 pa_assert(PA_SINK_IS_LINKED(s->state));
1642 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1644 if (s->muted == new_muted)
1647 s->muted = new_muted;
1648 s->save_muted = TRUE;
1650 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1653 /* Called from main thread */
1654 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1655 pa_sink_assert_ref(s);
1656 pa_assert_ctl_context();
1659 pa_proplist_update(s->proplist, mode, p);
1661 if (PA_SINK_IS_LINKED(s->state)) {
1662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1663 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1669 /* Called from main thread */
1670 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1671 void pa_sink_set_description(pa_sink *s, const char *description) {
1673 pa_sink_assert_ref(s);
1674 pa_assert_ctl_context();
1676 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1679 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1681 if (old && description && pa_streq(old, description))
1685 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1687 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1689 if (s->monitor_source) {
1692 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1693 pa_source_set_description(s->monitor_source, n);
1697 if (PA_SINK_IS_LINKED(s->state)) {
1698 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1699 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1703 /* Called from main thread */
1704 unsigned pa_sink_linked_by(pa_sink *s) {
1707 pa_sink_assert_ref(s);
1708 pa_assert_ctl_context();
1709 pa_assert(PA_SINK_IS_LINKED(s->state));
1711 ret = pa_idxset_size(s->inputs);
1713 /* We add in the number of streams connected to us here. Please
1714 * note the asymmmetry to pa_sink_used_by()! */
1716 if (s->monitor_source)
1717 ret += pa_source_linked_by(s->monitor_source);
1722 /* Called from main thread */
1723 unsigned pa_sink_used_by(pa_sink *s) {
1726 pa_sink_assert_ref(s);
1727 pa_assert_ctl_context();
1728 pa_assert(PA_SINK_IS_LINKED(s->state));
1730 ret = pa_idxset_size(s->inputs);
1731 pa_assert(ret >= s->n_corked);
1733 /* Streams connected to our monitor source do not matter for
1734 * pa_sink_used_by()!.*/
1736 return ret - s->n_corked;
1739 /* Called from main thread */
1740 unsigned pa_sink_check_suspend(pa_sink *s) {
1745 pa_sink_assert_ref(s);
1746 pa_assert_ctl_context();
1748 if (!PA_SINK_IS_LINKED(s->state))
1753 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1754 pa_sink_input_state_t st;
1756 st = pa_sink_input_get_state(i);
1757 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1759 if (st == PA_SINK_INPUT_CORKED)
1762 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1768 if (s->monitor_source)
1769 ret += pa_source_check_suspend(s->monitor_source);
1774 /* Called from the IO thread */
1775 static void sync_input_volumes_within_thread(pa_sink *s) {
1779 pa_sink_assert_ref(s);
1780 pa_sink_assert_io_context(s);
1782 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1783 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1786 i->thread_info.soft_volume = i->soft_volume;
1787 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1791 /* Called from IO thread, except when it is not */
1792 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1793 pa_sink *s = PA_SINK(o);
1794 pa_sink_assert_ref(s);
1796 switch ((pa_sink_message_t) code) {
1798 case PA_SINK_MESSAGE_ADD_INPUT: {
1799 pa_sink_input *i = PA_SINK_INPUT(userdata);
1801 /* If you change anything here, make sure to change the
1802 * sink input handling a few lines down at
1803 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1805 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1807 /* Since the caller sleeps in pa_sink_input_put(), we can
1808 * safely access data outside of thread_info even though
1811 if ((i->thread_info.sync_prev = i->sync_prev)) {
1812 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1813 pa_assert(i->sync_prev->sync_next == i);
1814 i->thread_info.sync_prev->thread_info.sync_next = i;
1817 if ((i->thread_info.sync_next = i->sync_next)) {
1818 pa_assert(i->sink == i->thread_info.sync_next->sink);
1819 pa_assert(i->sync_next->sync_prev == i);
1820 i->thread_info.sync_next->thread_info.sync_prev = i;
1823 pa_assert(!i->thread_info.attached);
1824 i->thread_info.attached = TRUE;
1829 pa_sink_input_set_state_within_thread(i, i->state);
1831 /* The requested latency of the sink input needs to be
1832 * fixed up and then configured on the sink */
1834 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1835 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1837 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1838 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1840 /* We don't rewind here automatically. This is left to the
1841 * sink input implementor because some sink inputs need a
1842 * slow start, i.e. need some time to buffer client
1843 * samples before beginning streaming. */
1845 /* In flat volume mode we need to update the volume as
1847 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1850 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1851 pa_sink_input *i = PA_SINK_INPUT(userdata);
1853 /* If you change anything here, make sure to change the
1854 * sink input handling a few lines down at
1855 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1860 pa_sink_input_set_state_within_thread(i, i->state);
1862 pa_assert(i->thread_info.attached);
1863 i->thread_info.attached = FALSE;
1865 /* Since the caller sleeps in pa_sink_input_unlink(),
1866 * we can safely access data outside of thread_info even
1867 * though it is mutable */
1869 pa_assert(!i->sync_prev);
1870 pa_assert(!i->sync_next);
1872 if (i->thread_info.sync_prev) {
1873 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1874 i->thread_info.sync_prev = NULL;
1877 if (i->thread_info.sync_next) {
1878 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1879 i->thread_info.sync_next = NULL;
1882 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1883 pa_sink_input_unref(i);
1885 pa_sink_invalidate_requested_latency(s, TRUE);
1886 pa_sink_request_rewind(s, (size_t) -1);
1888 /* In flat volume mode we need to update the volume as
1890 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1893 case PA_SINK_MESSAGE_START_MOVE: {
1894 pa_sink_input *i = PA_SINK_INPUT(userdata);
1896 /* We don't support moving synchronized streams. */
1897 pa_assert(!i->sync_prev);
1898 pa_assert(!i->sync_next);
1899 pa_assert(!i->thread_info.sync_next);
1900 pa_assert(!i->thread_info.sync_prev);
1902 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1904 size_t sink_nbytes, total_nbytes;
1906 /* Get the latency of the sink */
1907 usec = pa_sink_get_latency_within_thread(s);
1908 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1909 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1911 if (total_nbytes > 0) {
1912 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1913 i->thread_info.rewrite_flush = TRUE;
1914 pa_sink_input_process_rewind(i, sink_nbytes);
1921 pa_assert(i->thread_info.attached);
1922 i->thread_info.attached = FALSE;
1924 /* Let's remove the sink input ...*/
1925 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1926 pa_sink_input_unref(i);
1928 pa_sink_invalidate_requested_latency(s, TRUE);
1930 pa_log_debug("Requesting rewind due to started move");
1931 pa_sink_request_rewind(s, (size_t) -1);
1933 /* In flat volume mode we need to update the volume as
1935 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1938 case PA_SINK_MESSAGE_FINISH_MOVE: {
1939 pa_sink_input *i = PA_SINK_INPUT(userdata);
1941 /* We don't support moving synchronized streams. */
1942 pa_assert(!i->sync_prev);
1943 pa_assert(!i->sync_next);
1944 pa_assert(!i->thread_info.sync_next);
1945 pa_assert(!i->thread_info.sync_prev);
1947 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1949 pa_assert(!i->thread_info.attached);
1950 i->thread_info.attached = TRUE;
1955 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1956 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1958 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1959 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1961 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1965 /* Get the latency of the sink */
1966 usec = pa_sink_get_latency_within_thread(s);
1967 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1970 pa_sink_input_drop(i, nbytes);
1972 pa_log_debug("Requesting rewind due to finished move");
1973 pa_sink_request_rewind(s, nbytes);
1976 /* In flat volume mode we need to update the volume as
1978 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1981 case PA_SINK_MESSAGE_SET_VOLUME:
1983 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1984 s->thread_info.soft_volume = s->soft_volume;
1985 pa_sink_request_rewind(s, (size_t) -1);
1988 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1991 /* Fall through ... */
1993 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1994 sync_input_volumes_within_thread(s);
1997 case PA_SINK_MESSAGE_GET_VOLUME:
2000 case PA_SINK_MESSAGE_SET_MUTE:
2002 if (s->thread_info.soft_muted != s->muted) {
2003 s->thread_info.soft_muted = s->muted;
2004 pa_sink_request_rewind(s, (size_t) -1);
2009 case PA_SINK_MESSAGE_GET_MUTE:
2012 case PA_SINK_MESSAGE_SET_STATE: {
2014 pa_bool_t suspend_change =
2015 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2016 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2018 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2020 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2021 s->thread_info.rewind_nbytes = 0;
2022 s->thread_info.rewind_requested = FALSE;
2025 if (suspend_change) {
2029 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2030 if (i->suspend_within_thread)
2031 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2037 case PA_SINK_MESSAGE_DETACH:
2039 /* Detach all streams */
2040 pa_sink_detach_within_thread(s);
2043 case PA_SINK_MESSAGE_ATTACH:
2045 /* Reattach all streams */
2046 pa_sink_attach_within_thread(s);
2049 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2051 pa_usec_t *usec = userdata;
2052 *usec = pa_sink_get_requested_latency_within_thread(s);
2054 /* Yes, that's right, the IO thread will see -1 when no
2055 * explicit requested latency is configured, the main
2056 * thread will see max_latency */
2057 if (*usec == (pa_usec_t) -1)
2058 *usec = s->thread_info.max_latency;
2063 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2064 pa_usec_t *r = userdata;
2066 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2071 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2072 pa_usec_t *r = userdata;
2074 r[0] = s->thread_info.min_latency;
2075 r[1] = s->thread_info.max_latency;
2080 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2082 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2085 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2087 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2090 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2092 *((size_t*) userdata) = s->thread_info.max_rewind;
2095 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2097 *((size_t*) userdata) = s->thread_info.max_request;
2100 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2102 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2105 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2107 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2110 case PA_SINK_MESSAGE_GET_LATENCY:
2111 case PA_SINK_MESSAGE_MAX:
2118 /* Called from main thread */
2119 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2124 pa_core_assert_ref(c);
2125 pa_assert_ctl_context();
2126 pa_assert(cause != 0);
2128 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2131 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2138 /* Called from main thread */
2139 void pa_sink_detach(pa_sink *s) {
2140 pa_sink_assert_ref(s);
2141 pa_assert_ctl_context();
2142 pa_assert(PA_SINK_IS_LINKED(s->state));
2144 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2147 /* Called from main thread */
2148 void pa_sink_attach(pa_sink *s) {
2149 pa_sink_assert_ref(s);
2150 pa_assert_ctl_context();
2151 pa_assert(PA_SINK_IS_LINKED(s->state));
2153 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2156 /* Called from IO thread */
2157 void pa_sink_detach_within_thread(pa_sink *s) {
2161 pa_sink_assert_ref(s);
2162 pa_sink_assert_io_context(s);
2163 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2165 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2169 if (s->monitor_source)
2170 pa_source_detach_within_thread(s->monitor_source);
2173 /* Called from IO thread */
2174 void pa_sink_attach_within_thread(pa_sink *s) {
2178 pa_sink_assert_ref(s);
2179 pa_sink_assert_io_context(s);
2180 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2182 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2186 if (s->monitor_source)
2187 pa_source_attach_within_thread(s->monitor_source);
2190 /* Called from IO thread */
2191 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2192 pa_sink_assert_ref(s);
2193 pa_sink_assert_io_context(s);
2194 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2196 if (s->thread_info.state == PA_SINK_SUSPENDED)
2199 if (nbytes == (size_t) -1)
2200 nbytes = s->thread_info.max_rewind;
2202 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2204 if (s->thread_info.rewind_requested &&
2205 nbytes <= s->thread_info.rewind_nbytes)
2208 s->thread_info.rewind_nbytes = nbytes;
2209 s->thread_info.rewind_requested = TRUE;
2211 if (s->request_rewind)
2212 s->request_rewind(s);
2215 /* Called from IO thread */
2216 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2217 pa_usec_t result = (pa_usec_t) -1;
2220 pa_usec_t monitor_latency;
2222 pa_sink_assert_ref(s);
2223 pa_sink_assert_io_context(s);
2225 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2226 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2228 if (s->thread_info.requested_latency_valid)
2229 return s->thread_info.requested_latency;
2231 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2232 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2233 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2234 result = i->thread_info.requested_sink_latency;
2236 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2238 if (monitor_latency != (pa_usec_t) -1 &&
2239 (result == (pa_usec_t) -1 || result > monitor_latency))
2240 result = monitor_latency;
2242 if (result != (pa_usec_t) -1)
2243 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2245 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2246 /* Only cache if properly initialized */
2247 s->thread_info.requested_latency = result;
2248 s->thread_info.requested_latency_valid = TRUE;
2254 /* Called from main thread */
2255 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2258 pa_sink_assert_ref(s);
2259 pa_assert_ctl_context();
2260 pa_assert(PA_SINK_IS_LINKED(s->state));
2262 if (s->state == PA_SINK_SUSPENDED)
2265 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2269 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2270 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2274 pa_sink_assert_ref(s);
2275 pa_sink_assert_io_context(s);
2277 if (max_rewind == s->thread_info.max_rewind)
2280 s->thread_info.max_rewind = max_rewind;
2282 if (PA_SINK_IS_LINKED(s->thread_info.state))
2283 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2284 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2286 if (s->monitor_source)
2287 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2290 /* Called from main thread */
2291 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2292 pa_sink_assert_ref(s);
2293 pa_assert_ctl_context();
2295 if (PA_SINK_IS_LINKED(s->state))
2296 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2298 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2301 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2302 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2305 pa_sink_assert_ref(s);
2306 pa_sink_assert_io_context(s);
2308 if (max_request == s->thread_info.max_request)
2311 s->thread_info.max_request = max_request;
2313 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2316 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2317 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2321 /* Called from main thread */
2322 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2323 pa_sink_assert_ref(s);
2324 pa_assert_ctl_context();
2326 if (PA_SINK_IS_LINKED(s->state))
2327 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2329 pa_sink_set_max_request_within_thread(s, max_request);
2332 /* Called from IO thread */
2333 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2337 pa_sink_assert_ref(s);
2338 pa_sink_assert_io_context(s);
2340 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2341 s->thread_info.requested_latency_valid = FALSE;
2345 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2347 if (s->update_requested_latency)
2348 s->update_requested_latency(s);
2350 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2351 if (i->update_sink_requested_latency)
2352 i->update_sink_requested_latency(i);
2356 /* Called from main thread */
2357 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2358 pa_sink_assert_ref(s);
2359 pa_assert_ctl_context();
2361 /* min_latency == 0: no limit
2362 * min_latency anything else: specified limit
2364 * Similar for max_latency */
2366 if (min_latency < ABSOLUTE_MIN_LATENCY)
2367 min_latency = ABSOLUTE_MIN_LATENCY;
2369 if (max_latency <= 0 ||
2370 max_latency > ABSOLUTE_MAX_LATENCY)
2371 max_latency = ABSOLUTE_MAX_LATENCY;
2373 pa_assert(min_latency <= max_latency);
2375 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2376 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2377 max_latency == ABSOLUTE_MAX_LATENCY) ||
2378 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2380 if (PA_SINK_IS_LINKED(s->state)) {
2386 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2388 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2391 /* Called from main thread */
2392 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2393 pa_sink_assert_ref(s);
2394 pa_assert_ctl_context();
2395 pa_assert(min_latency);
2396 pa_assert(max_latency);
2398 if (PA_SINK_IS_LINKED(s->state)) {
2399 pa_usec_t r[2] = { 0, 0 };
2401 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2403 *min_latency = r[0];
2404 *max_latency = r[1];
2406 *min_latency = s->thread_info.min_latency;
2407 *max_latency = s->thread_info.max_latency;
2411 /* Called from IO thread */
2412 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2413 pa_sink_assert_ref(s);
2414 pa_sink_assert_io_context(s);
2416 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2417 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2418 pa_assert(min_latency <= max_latency);
2420 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2421 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2422 max_latency == ABSOLUTE_MAX_LATENCY) ||
2423 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2425 if (s->thread_info.min_latency == min_latency &&
2426 s->thread_info.max_latency == max_latency)
2429 s->thread_info.min_latency = min_latency;
2430 s->thread_info.max_latency = max_latency;
2432 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2436 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2437 if (i->update_sink_latency_range)
2438 i->update_sink_latency_range(i);
2441 pa_sink_invalidate_requested_latency(s, FALSE);
2443 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2446 /* Called from main thread */
2447 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2448 pa_sink_assert_ref(s);
2449 pa_assert_ctl_context();
2451 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2452 pa_assert(latency == 0);
2456 if (latency < ABSOLUTE_MIN_LATENCY)
2457 latency = ABSOLUTE_MIN_LATENCY;
2459 if (latency > ABSOLUTE_MAX_LATENCY)
2460 latency = ABSOLUTE_MAX_LATENCY;
2462 if (PA_SINK_IS_LINKED(s->state))
2463 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2465 s->thread_info.fixed_latency = latency;
2467 pa_source_set_fixed_latency(s->monitor_source, latency);
2470 /* Called from main thread */
2471 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2474 pa_sink_assert_ref(s);
2475 pa_assert_ctl_context();
2477 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2480 if (PA_SINK_IS_LINKED(s->state))
2481 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2483 latency = s->thread_info.fixed_latency;
2488 /* Called from IO thread */
2489 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2490 pa_sink_assert_ref(s);
2491 pa_sink_assert_io_context(s);
2493 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2494 pa_assert(latency == 0);
2498 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2499 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2501 if (s->thread_info.fixed_latency == latency)
2504 s->thread_info.fixed_latency = latency;
2506 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2510 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2511 if (i->update_sink_fixed_latency)
2512 i->update_sink_fixed_latency(i);
2515 pa_sink_invalidate_requested_latency(s, FALSE);
2517 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2520 /* Called from main context */
2521 size_t pa_sink_get_max_rewind(pa_sink *s) {
2523 pa_sink_assert_ref(s);
2524 pa_assert_ctl_context();
2526 if (!PA_SINK_IS_LINKED(s->state))
2527 return s->thread_info.max_rewind;
2529 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2534 /* Called from main context */
2535 size_t pa_sink_get_max_request(pa_sink *s) {
2537 pa_sink_assert_ref(s);
2538 pa_assert_ctl_context();
2540 if (!PA_SINK_IS_LINKED(s->state))
2541 return s->thread_info.max_request;
2543 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2548 /* Called from main context */
2549 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2550 pa_device_port *port;
2552 pa_sink_assert_ref(s);
2553 pa_assert_ctl_context();
2556 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2557 return -PA_ERR_NOTIMPLEMENTED;
2561 return -PA_ERR_NOENTITY;
2563 if (!(port = pa_hashmap_get(s->ports, name)))
2564 return -PA_ERR_NOENTITY;
2566 if (s->active_port == port) {
2567 s->save_port = s->save_port || save;
2571 if ((s->set_port(s, port)) < 0)
2572 return -PA_ERR_NOENTITY;
2574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2576 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2578 s->active_port = port;
2579 s->save_port = save;
2584 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2585 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2589 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2592 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2594 if (pa_streq(ff, "microphone"))
2595 t = "audio-input-microphone";
2596 else if (pa_streq(ff, "webcam"))
2598 else if (pa_streq(ff, "computer"))
2600 else if (pa_streq(ff, "handset"))
2602 else if (pa_streq(ff, "portable"))
2603 t = "multimedia-player";
2604 else if (pa_streq(ff, "tv"))
2605 t = "video-display";
2608 * The following icons are not part of the icon naming spec,
2609 * because Rodney Dawes sucks as the maintainer of that spec.
2611 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2613 else if (pa_streq(ff, "headset"))
2614 t = "audio-headset";
2615 else if (pa_streq(ff, "headphone"))
2616 t = "audio-headphones";
2617 else if (pa_streq(ff, "speaker"))
2618 t = "audio-speakers";
2619 else if (pa_streq(ff, "hands-free"))
2620 t = "audio-handsfree";
2624 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2625 if (pa_streq(c, "modem"))
2632 t = "audio-input-microphone";
2635 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2636 if (strstr(profile, "analog"))
2638 else if (strstr(profile, "iec958"))
2640 else if (strstr(profile, "hdmi"))
2644 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2646 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2651 pa_bool_t pa_device_init_description(pa_proplist *p) {
2652 const char *s, *d = NULL, *k;
2655 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2658 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2659 if (pa_streq(s, "internal"))
2660 d = _("Internal Audio");
2663 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2664 if (pa_streq(s, "modem"))
2668 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2673 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2676 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2678 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2683 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2687 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2690 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2691 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2692 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");