Add default-monitor-time-sec
[platform/upstream/pulseaudio.git] / src / modules / echo-cancel / module-echo-cancel.c
1 /***
2     This file is part of PulseAudio.
3
4     Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
5
6     Based on module-virtual-sink.c
7              module-virtual-source.c
8              module-loopback.c
9
10         Copyright 2010 Intel Corporation
11         Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
12
13     PulseAudio is free software; you can redistribute it and/or modify
14     it under the terms of the GNU Lesser General Public License as published
15     by the Free Software Foundation; either version 2.1 of the License,
16     or (at your option) any later version.
17
18     PulseAudio is distributed in the hope that it will be useful, but
19     WITHOUT ANY WARRANTY; without even the implied warranty of
20     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21     General Public License for more details.
22
23     You should have received a copy of the GNU Lesser General Public License
24     along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
25 ***/
26
27 #ifdef HAVE_CONFIG_H
28 #include <config.h>
29 #endif
30
31 #include <stdio.h>
32 #include <math.h>
33
34 #include "echo-cancel.h"
35
36 #include <pulse/xmalloc.h>
37 #include <pulse/timeval.h>
38 #include <pulse/rtclock.h>
39
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/atomic.h>
42 #include <pulsecore/macro.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/sink.h>
45 #include <pulsecore/module.h>
46 #include <pulsecore/core-rtclock.h>
47 #include <pulsecore/core-util.h>
48 #include <pulsecore/modargs.h>
49 #include <pulsecore/log.h>
50 #include <pulsecore/rtpoll.h>
51 #include <pulsecore/sample-util.h>
52 #include <pulsecore/ltdl-helper.h>
53
54 PA_MODULE_AUTHOR("Wim Taymans");
55 PA_MODULE_DESCRIPTION("Echo Cancellation");
56 PA_MODULE_VERSION(PACKAGE_VERSION);
57 PA_MODULE_LOAD_ONCE(false);
58 PA_MODULE_USAGE(
59         _("source_name=<name for the source> "
60           "source_properties=<properties for the source> "
61           "source_master=<name of source to filter> "
62           "sink_name=<name for the sink> "
63           "sink_properties=<properties for the sink> "
64           "sink_master=<name of sink to filter> "
65           "adjust_time=<how often to readjust rates in s> "
66           "adjust_threshold=<how much drift to readjust after in ms> "
67           "format=<sample format> "
68           "rate=<sample rate> "
69           "channels=<number of channels> "
70           "channel_map=<channel map> "
71           "aec_method=<implementation to use> "
72           "aec_args=<parameters for the AEC engine> "
73           "save_aec=<save AEC data in /tmp> "
74           "autoloaded=<set if this module is being loaded automatically> "
75           "use_volume_sharing=<yes or no> "
76           "use_master_format=<yes or no> "
77         ));
78
79 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
80 typedef enum {
81     PA_ECHO_CANCELLER_INVALID = -1,
82     PA_ECHO_CANCELLER_NULL,
83 #ifdef HAVE_SPEEX
84     PA_ECHO_CANCELLER_SPEEX,
85 #endif
86 #ifdef HAVE_ADRIAN_EC
87     PA_ECHO_CANCELLER_ADRIAN,
88 #endif
89 #ifdef HAVE_WEBRTC
90     PA_ECHO_CANCELLER_WEBRTC,
91 #endif
92 } pa_echo_canceller_method_t;
93
94 #ifdef HAVE_WEBRTC
95 #define DEFAULT_ECHO_CANCELLER "webrtc"
96 #else
97 #define DEFAULT_ECHO_CANCELLER "speex"
98 #endif
99
100 static const pa_echo_canceller ec_table[] = {
101     {
102         /* Null, Dummy echo canceller (just copies data) */
103         .init                   = pa_null_ec_init,
104         .run                    = pa_null_ec_run,
105         .done                   = pa_null_ec_done,
106     },
107 #ifdef HAVE_SPEEX
108     {
109         /* Speex */
110         .init                   = pa_speex_ec_init,
111         .run                    = pa_speex_ec_run,
112         .done                   = pa_speex_ec_done,
113     },
114 #endif
115 #ifdef HAVE_ADRIAN_EC
116     {
117         /* Adrian Andre's NLMS implementation */
118         .init                   = pa_adrian_ec_init,
119         .run                    = pa_adrian_ec_run,
120         .done                   = pa_adrian_ec_done,
121     },
122 #endif
123 #ifdef HAVE_WEBRTC
124     {
125         /* WebRTC's audio processing engine */
126         .init                   = pa_webrtc_ec_init,
127         .play                   = pa_webrtc_ec_play,
128         .record                 = pa_webrtc_ec_record,
129         .set_drift              = pa_webrtc_ec_set_drift,
130         .run                    = pa_webrtc_ec_run,
131         .done                   = pa_webrtc_ec_done,
132     },
133 #endif
134 };
135
136 #define DEFAULT_RATE 32000
137 #define DEFAULT_CHANNELS 1
138 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
139 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
140 #define DEFAULT_SAVE_AEC false
141 #define DEFAULT_AUTOLOADED false
142 #define DEFAULT_USE_MASTER_FORMAT false
143
144 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
145
146 #define MAX_LATENCY_BLOCKS 10
147
148 /* Can only be used in main context */
149 #define IS_ACTIVE(u) (((u)->source->state == PA_SOURCE_RUNNING) && \
150                       ((u)->sink->state == PA_SINK_RUNNING))
151
152 /* This module creates a new (virtual) source and sink.
153  *
154  * The data sent to the new sink is kept in a memblockq before being
155  * forwarded to the real sink_master.
156  *
157  * Data read from source_master is matched against the saved sink data and
158  * echo canceled data is then pushed onto the new source.
159  *
160  * Both source and sink masters have their own threads to push/pull data
161  * respectively. We however perform all our actions in the source IO thread.
162  * To do this we send all played samples to the source IO thread where they
163  * are then pushed into the memblockq.
164  *
165  * Alignment is performed in two steps:
166  *
167  * 1) when something happens that requires quick adjustment of the alignment of
168  *    capture and playback samples, we perform a resync. This adjusts the
169  *    position in the playback memblock to the requested sample. Quick
170  *    adjustments include moving the playback samples before the capture
171  *    samples (because else the echo canceller does not work) or when the
172  *    playback pointer drifts too far away.
173  *
174  * 2) periodically check the difference between capture and playback. We use a
175  *    low and high watermark for adjusting the alignment. Playback should always
176  *    be before capture and the difference should not be bigger than one frame
177  *    size. We would ideally like to resample the sink_input but most driver
178  *    don't give enough accuracy to be able to do that right now.
179  */
180
181 struct userdata;
182
183 struct pa_echo_canceller_msg {
184     pa_msgobject parent;
185     bool dead;
186     struct userdata *userdata;
187 };
188
189 PA_DEFINE_PRIVATE_CLASS(pa_echo_canceller_msg, pa_msgobject);
190 #define PA_ECHO_CANCELLER_MSG(o) (pa_echo_canceller_msg_cast(o))
191
192 struct snapshot {
193     pa_usec_t sink_now;
194     pa_usec_t sink_latency;
195     size_t sink_delay;
196     int64_t send_counter;
197
198     pa_usec_t source_now;
199     pa_usec_t source_latency;
200     size_t source_delay;
201     int64_t recv_counter;
202     size_t rlen;
203     size_t plen;
204 };
205
206 struct userdata {
207     pa_core *core;
208     pa_module *module;
209
210     bool dead;
211     bool save_aec;
212
213     pa_echo_canceller *ec;
214     uint32_t source_output_blocksize;
215     uint32_t source_blocksize;
216     uint32_t sink_blocksize;
217
218     bool need_realign;
219
220     /* to wakeup the source I/O thread */
221     pa_asyncmsgq *asyncmsgq;
222     pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
223
224     pa_source *source;
225     bool source_auto_desc;
226     pa_source_output *source_output;
227     pa_memblockq *source_memblockq; /* echo canceller needs fixed sized chunks */
228     size_t source_skip;
229
230     pa_sink *sink;
231     bool sink_auto_desc;
232     pa_sink_input *sink_input;
233     pa_memblockq *sink_memblockq;
234     int64_t send_counter;          /* updated in sink IO thread */
235     int64_t recv_counter;
236     size_t sink_skip;
237
238     /* Bytes left over from previous iteration */
239     size_t sink_rem;
240     size_t source_rem;
241
242     pa_atomic_t request_resync;
243
244     pa_time_event *time_event;
245     pa_usec_t adjust_time;
246     int adjust_threshold;
247
248     FILE *captured_file;
249     FILE *played_file;
250     FILE *canceled_file;
251     FILE *drift_file;
252
253     bool use_volume_sharing;
254
255     struct {
256         pa_cvolume current_volume;
257     } thread_info;
258 };
259
260 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
261
262 static const char* const valid_modargs[] = {
263     "source_name",
264     "source_properties",
265     "source_master",
266     "sink_name",
267     "sink_properties",
268     "sink_master",
269     "adjust_time",
270     "adjust_threshold",
271     "format",
272     "rate",
273     "channels",
274     "channel_map",
275     "aec_method",
276     "aec_args",
277     "save_aec",
278     "autoloaded",
279     "use_volume_sharing",
280     "use_master_format",
281     NULL
282 };
283
284 enum {
285     SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
286     SOURCE_OUTPUT_MESSAGE_REWIND,
287     SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
288     SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
289 };
290
291 enum {
292     SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
293 };
294
295 enum {
296     ECHO_CANCELLER_MESSAGE_SET_VOLUME,
297 };
298
299 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
300     int64_t diff_time, buffer_latency;
301     pa_usec_t plen, rlen, source_delay, sink_delay, recv_counter, send_counter;
302
303     /* get latency difference between playback and record */
304     plen = pa_bytes_to_usec(snapshot->plen, &u->sink_input->sample_spec);
305     rlen = pa_bytes_to_usec(snapshot->rlen, &u->source_output->sample_spec);
306     if (plen > rlen)
307         buffer_latency = plen - rlen;
308     else
309         buffer_latency = 0;
310
311     source_delay = pa_bytes_to_usec(snapshot->source_delay, &u->source_output->sample_spec);
312     sink_delay = pa_bytes_to_usec(snapshot->sink_delay, &u->sink_input->sample_spec);
313     buffer_latency += source_delay + sink_delay;
314
315     /* add the latency difference due to samples not yet transferred */
316     send_counter = pa_bytes_to_usec(snapshot->send_counter, &u->sink->sample_spec);
317     recv_counter = pa_bytes_to_usec(snapshot->recv_counter, &u->sink->sample_spec);
318     if (recv_counter <= send_counter)
319         buffer_latency += (int64_t) (send_counter - recv_counter);
320     else
321         buffer_latency = PA_CLIP_SUB(buffer_latency, (int64_t) (recv_counter - send_counter));
322
323     /* capture and playback are perfectly aligned when diff_time is 0 */
324     diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
325           (snapshot->source_now - snapshot->source_latency);
326
327     pa_log_debug("Diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
328         (long long) snapshot->sink_latency,
329         (long long) buffer_latency, (long long) snapshot->source_latency,
330         (long long) source_delay, (long long) sink_delay,
331         (long long) (send_counter - recv_counter),
332         (long long) (snapshot->sink_now - snapshot->source_now));
333
334     return diff_time;
335 }
336
337 /* Called from main context */
338 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
339     struct userdata *u = userdata;
340     uint32_t old_rate, base_rate, new_rate;
341     int64_t diff_time;
342     /*size_t fs*/
343     struct snapshot latency_snapshot;
344
345     pa_assert(u);
346     pa_assert(a);
347     pa_assert(u->time_event == e);
348     pa_assert_ctl_context();
349
350     if (!IS_ACTIVE(u))
351         return;
352
353     /* update our snapshots */
354     pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
355     pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
356
357     /* calculate drift between capture and playback */
358     diff_time = calc_diff(u, &latency_snapshot);
359
360     /*fs = pa_frame_size(&u->source_output->sample_spec);*/
361     old_rate = u->sink_input->sample_spec.rate;
362     base_rate = u->source_output->sample_spec.rate;
363
364     if (diff_time < 0) {
365         /* recording before playback, we need to adjust quickly. The echo
366          * canceller does not work in this case. */
367         pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
368             NULL, diff_time, NULL, NULL);
369         /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
370         new_rate = base_rate;
371     }
372     else {
373         if (diff_time > u->adjust_threshold) {
374             /* diff too big, quickly adjust */
375             pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
376                 NULL, diff_time, NULL, NULL);
377         }
378
379         /* recording behind playback, we need to slowly adjust the rate to match */
380         /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
381
382         /* assume equal samplerates for now */
383         new_rate = base_rate;
384     }
385
386     /* make sure we don't make too big adjustments because that sounds horrible */
387     if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
388         new_rate = base_rate;
389
390     if (new_rate != old_rate) {
391         pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
392
393         pa_sink_input_set_rate(u->sink_input, new_rate);
394     }
395
396     pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
397 }
398
399 /* Called from source I/O thread context */
400 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
401     struct userdata *u = PA_SOURCE(o)->userdata;
402
403     switch (code) {
404
405         case PA_SOURCE_MESSAGE_GET_LATENCY:
406
407             /* The source is _put() before the source output is, so let's
408              * make sure we don't access it in that time. Also, the
409              * source output is first shut down, the source second. */
410             if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
411                 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
412                 *((int64_t*) data) = 0;
413                 return 0;
414             }
415
416             *((int64_t*) data) =
417
418                 /* Get the latency of the master source */
419                 pa_source_get_latency_within_thread(u->source_output->source, true) +
420                 /* Add the latency internal to our source output on top */
421                 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
422                 /* and the buffering we do on the source */
423                 pa_bytes_to_usec(u->source_output_blocksize, &u->source_output->source->sample_spec);
424
425             return 0;
426
427         case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
428             u->thread_info.current_volume = u->source->reference_volume;
429             break;
430     }
431
432     return pa_source_process_msg(o, code, data, offset, chunk);
433 }
434
435 /* Called from sink I/O thread context */
436 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
437     struct userdata *u = PA_SINK(o)->userdata;
438
439     switch (code) {
440
441         case PA_SINK_MESSAGE_GET_LATENCY:
442
443             /* The sink is _put() before the sink input is, so let's
444              * make sure we don't access it in that time. Also, the
445              * sink input is first shut down, the sink second. */
446             if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
447                 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
448                 *((int64_t*) data) = 0;
449                 return 0;
450             }
451
452             *((int64_t*) data) =
453
454                 /* Get the latency of the master sink */
455                 pa_sink_get_latency_within_thread(u->sink_input->sink, true) +
456
457                 /* Add the latency internal to our sink input on top */
458                 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
459
460             return 0;
461     }
462
463     return pa_sink_process_msg(o, code, data, offset, chunk);
464 }
465
466 /* Called from main context */
467 static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t state, pa_suspend_cause_t suspend_cause) {
468     struct userdata *u;
469
470     pa_source_assert_ref(s);
471     pa_assert_se(u = s->userdata);
472
473     if (!PA_SOURCE_IS_LINKED(state) ||
474         !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state))
475         return 0;
476
477     if (state == PA_SOURCE_RUNNING) {
478         /* restart timer when both sink and source are active */
479         if ((u->sink->state == PA_SINK_RUNNING) && u->adjust_time)
480             pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
481
482         pa_atomic_store(&u->request_resync, 1);
483         pa_source_output_cork(u->source_output, false);
484     } else if (state == PA_SOURCE_SUSPENDED) {
485         pa_source_output_cork(u->source_output, true);
486     }
487
488     return 0;
489 }
490
491 /* Called from main context */
492 static int sink_set_state_in_main_thread_cb(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) {
493     struct userdata *u;
494
495     pa_sink_assert_ref(s);
496     pa_assert_se(u = s->userdata);
497
498     if (!PA_SINK_IS_LINKED(state) ||
499         !PA_SINK_INPUT_IS_LINKED(u->sink_input->state))
500         return 0;
501
502     if (state == PA_SINK_RUNNING) {
503         /* restart timer when both sink and source are active */
504         if ((u->source->state == PA_SOURCE_RUNNING) && u->adjust_time)
505             pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
506
507         pa_atomic_store(&u->request_resync, 1);
508         pa_sink_input_cork(u->sink_input, false);
509     } else if (state == PA_SINK_SUSPENDED) {
510         pa_sink_input_cork(u->sink_input, true);
511     }
512
513     return 0;
514 }
515
516 /* Called from the IO thread. */
517 static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) {
518     struct userdata *u;
519
520     pa_assert(s);
521     pa_assert_se(u = s->userdata);
522
523     /* When set to running or idle for the first time, request a rewind
524      * of the master sink to make sure we are heard immediately */
525     if (PA_SINK_IS_OPENED(new_state) && s->thread_info.state == PA_SINK_INIT) {
526         pa_log_debug("Requesting rewind due to state change.");
527         pa_sink_input_request_rewind(u->sink_input, 0, false, true, true);
528     }
529
530     return 0;
531 }
532
533 /* Called from source I/O thread context */
534 static void source_update_requested_latency_cb(pa_source *s) {
535     struct userdata *u;
536     pa_usec_t latency;
537
538     pa_source_assert_ref(s);
539     pa_assert_se(u = s->userdata);
540
541     if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
542         !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
543         return;
544
545     pa_log_debug("Source update requested latency");
546
547     /* Cap the maximum latency so we don't have to process too large chunks */
548     latency = PA_MIN(pa_source_get_requested_latency_within_thread(s),
549                      pa_bytes_to_usec(u->source_blocksize, &s->sample_spec) * MAX_LATENCY_BLOCKS);
550
551     pa_source_output_set_requested_latency_within_thread(u->source_output, latency);
552 }
553
554 /* Called from sink I/O thread context */
555 static void sink_update_requested_latency_cb(pa_sink *s) {
556     struct userdata *u;
557     pa_usec_t latency;
558
559     pa_sink_assert_ref(s);
560     pa_assert_se(u = s->userdata);
561
562     if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
563         !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
564         return;
565
566     pa_log_debug("Sink update requested latency");
567
568     /* Cap the maximum latency so we don't have to process too large chunks */
569     latency = PA_MIN(pa_sink_get_requested_latency_within_thread(s),
570                      pa_bytes_to_usec(u->sink_blocksize, &s->sample_spec) * MAX_LATENCY_BLOCKS);
571
572     pa_sink_input_set_requested_latency_within_thread(u->sink_input, latency);
573 }
574
575 /* Called from sink I/O thread context */
576 static void sink_request_rewind_cb(pa_sink *s) {
577     struct userdata *u;
578
579     pa_sink_assert_ref(s);
580     pa_assert_se(u = s->userdata);
581
582     if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
583         !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
584         return;
585
586     pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
587
588     /* Just hand this one over to the master sink */
589     pa_sink_input_request_rewind(u->sink_input,
590                                  s->thread_info.rewind_nbytes, true, false, false);
591 }
592
593 /* Called from main context */
594 static void source_set_volume_cb(pa_source *s) {
595     struct userdata *u;
596
597     pa_source_assert_ref(s);
598     pa_assert_se(u = s->userdata);
599
600     if (!PA_SOURCE_IS_LINKED(s->state) ||
601         !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state))
602         return;
603
604     pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, true);
605 }
606
607 /* Called from main context */
608 static void sink_set_volume_cb(pa_sink *s) {
609     struct userdata *u;
610
611     pa_sink_assert_ref(s);
612     pa_assert_se(u = s->userdata);
613
614     if (!PA_SINK_IS_LINKED(s->state) ||
615         !PA_SINK_INPUT_IS_LINKED(u->sink_input->state))
616         return;
617
618     pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, true);
619 }
620
621 /* Called from main context. */
622 static void source_get_volume_cb(pa_source *s) {
623     struct userdata *u;
624     pa_cvolume v;
625
626     pa_source_assert_ref(s);
627     pa_assert_se(u = s->userdata);
628
629     if (!PA_SOURCE_IS_LINKED(s->state) ||
630         !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state))
631         return;
632
633     pa_source_output_get_volume(u->source_output, &v, true);
634
635     if (pa_cvolume_equal(&s->real_volume, &v))
636         /* no change */
637         return;
638
639     s->real_volume = v;
640     pa_source_set_soft_volume(s, NULL);
641 }
642
643 /* Called from main context */
644 static void source_set_mute_cb(pa_source *s) {
645     struct userdata *u;
646
647     pa_source_assert_ref(s);
648     pa_assert_se(u = s->userdata);
649
650     if (!PA_SOURCE_IS_LINKED(s->state) ||
651         !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state))
652         return;
653
654     pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
655 }
656
657 /* Called from main context */
658 static void sink_set_mute_cb(pa_sink *s) {
659     struct userdata *u;
660
661     pa_sink_assert_ref(s);
662     pa_assert_se(u = s->userdata);
663
664     if (!PA_SINK_IS_LINKED(s->state) ||
665         !PA_SINK_INPUT_IS_LINKED(u->sink_input->state))
666         return;
667
668     pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
669 }
670
671 /* Called from source I/O thread context. */
672 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
673     int64_t diff;
674
675     if (diff_time < 0) {
676         diff = pa_usec_to_bytes(-diff_time, &u->sink_input->sample_spec);
677
678         if (diff > 0) {
679             /* add some extra safety samples to compensate for jitter in the
680              * timings */
681             diff += 10 * pa_frame_size (&u->sink_input->sample_spec);
682
683             pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
684
685             u->sink_skip = diff;
686             u->source_skip = 0;
687         }
688     } else if (diff_time > 0) {
689         diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
690
691         if (diff > 0) {
692             pa_log("Playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
693
694             u->source_skip = diff;
695             u->sink_skip = 0;
696         }
697     }
698 }
699
700 /* Called from source I/O thread context. */
701 static void do_resync(struct userdata *u) {
702     int64_t diff_time;
703     struct snapshot latency_snapshot;
704
705     pa_log("Doing resync");
706
707     /* update our snapshot */
708     /* 1. Get sink input latency snapshot, might cause buffers to be sent to source thread */
709     pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
710     /* 2. Pick up any in-flight buffers (and discard if needed) */
711     while (pa_asyncmsgq_process_one(u->asyncmsgq))
712         ;
713     /* 3. Now get the source output latency snapshot */
714     source_output_snapshot_within_thread(u, &latency_snapshot);
715
716     /* calculate drift between capture and playback */
717     diff_time = calc_diff(u, &latency_snapshot);
718
719     /* and adjust for the drift */
720     apply_diff_time(u, diff_time);
721 }
722
723 /* 1. Calculate drift at this point, pass to canceller
724  * 2. Push out playback samples in blocksize chunks
725  * 3. Push out capture samples in blocksize chunks
726  * 4. ???
727  * 5. Profit
728  *
729  * Called from source I/O thread context.
730  */
731 static void do_push_drift_comp(struct userdata *u) {
732     size_t rlen, plen;
733     pa_memchunk rchunk, pchunk, cchunk;
734     uint8_t *rdata, *pdata, *cdata;
735     float drift;
736     int unused PA_GCC_UNUSED;
737
738     rlen = pa_memblockq_get_length(u->source_memblockq);
739     plen = pa_memblockq_get_length(u->sink_memblockq);
740
741     /* Estimate snapshot drift as follows:
742      *   pd: amount of data consumed since last time
743      *   rd: amount of data consumed since last time
744      *
745      *   drift = (pd - rd) / rd;
746      *
747      * We calculate pd and rd as the memblockq length less the number of
748      * samples left from the last iteration (to avoid double counting
749      * those remainder samples.
750      */
751     drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
752     u->sink_rem = plen % u->sink_blocksize;
753     u->source_rem = rlen % u->source_output_blocksize;
754
755     if (u->save_aec) {
756         if (u->drift_file)
757             fprintf(u->drift_file, "d %a\n", drift);
758     }
759
760     /* Send in the playback samples first */
761     while (plen >= u->sink_blocksize) {
762         pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk);
763         pdata = pa_memblock_acquire(pchunk.memblock);
764         pdata += pchunk.index;
765
766         u->ec->play(u->ec, pdata);
767
768         if (u->save_aec) {
769             if (u->drift_file)
770                 fprintf(u->drift_file, "p %d\n", u->sink_blocksize);
771             if (u->played_file)
772                 unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file);
773         }
774
775         pa_memblock_release(pchunk.memblock);
776         pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize);
777         pa_memblock_unref(pchunk.memblock);
778
779         plen -= u->sink_blocksize;
780     }
781
782     /* And now the capture samples */
783     while (rlen >= u->source_output_blocksize) {
784         pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_output_blocksize, &rchunk);
785
786         rdata = pa_memblock_acquire(rchunk.memblock);
787         rdata += rchunk.index;
788
789         cchunk.index = 0;
790         cchunk.length = u->source_output_blocksize;
791         cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
792         cdata = pa_memblock_acquire(cchunk.memblock);
793
794         u->ec->set_drift(u->ec, drift);
795         u->ec->record(u->ec, rdata, cdata);
796
797         if (u->save_aec) {
798             if (u->drift_file)
799                 fprintf(u->drift_file, "c %d\n", u->source_output_blocksize);
800             if (u->captured_file)
801                 unused = fwrite(rdata, 1, u->source_output_blocksize, u->captured_file);
802             if (u->canceled_file)
803                 unused = fwrite(cdata, 1, u->source_output_blocksize, u->canceled_file);
804         }
805
806         pa_memblock_release(cchunk.memblock);
807         pa_memblock_release(rchunk.memblock);
808
809         pa_memblock_unref(rchunk.memblock);
810
811         pa_source_post(u->source, &cchunk);
812         pa_memblock_unref(cchunk.memblock);
813
814         pa_memblockq_drop(u->source_memblockq, u->source_output_blocksize);
815         rlen -= u->source_output_blocksize;
816     }
817 }
818
819 /* This one's simpler than the drift compensation case -- we just iterate over
820  * the capture buffer, and pass the canceller blocksize bytes of playback and
821  * capture data. If playback is currently inactive, we just push silence.
822  *
823  * Called from source I/O thread context. */
824 static void do_push(struct userdata *u) {
825     size_t rlen, plen;
826     pa_memchunk rchunk, pchunk, cchunk;
827     uint8_t *rdata, *pdata, *cdata;
828     int unused PA_GCC_UNUSED;
829
830     rlen = pa_memblockq_get_length(u->source_memblockq);
831     plen = pa_memblockq_get_length(u->sink_memblockq);
832
833     while (rlen >= u->source_output_blocksize) {
834
835         /* take fixed blocks from recorded and played samples */
836         pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_output_blocksize, &rchunk);
837         pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk);
838
839         /* we ran out of played data and pchunk has been filled with silence bytes */
840         if (plen < u->sink_blocksize)
841             pa_memblockq_seek(u->sink_memblockq, u->sink_blocksize - plen, PA_SEEK_RELATIVE, true);
842
843         rdata = pa_memblock_acquire(rchunk.memblock);
844         rdata += rchunk.index;
845         pdata = pa_memblock_acquire(pchunk.memblock);
846         pdata += pchunk.index;
847
848         cchunk.index = 0;
849         cchunk.length = u->source_blocksize;
850         cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
851         cdata = pa_memblock_acquire(cchunk.memblock);
852
853         if (u->save_aec) {
854             if (u->captured_file)
855                 unused = fwrite(rdata, 1, u->source_output_blocksize, u->captured_file);
856             if (u->played_file)
857                 unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file);
858         }
859
860         /* perform echo cancellation */
861         u->ec->run(u->ec, rdata, pdata, cdata);
862
863         if (u->save_aec) {
864             if (u->canceled_file)
865                 unused = fwrite(cdata, 1, u->source_blocksize, u->canceled_file);
866         }
867
868         pa_memblock_release(cchunk.memblock);
869         pa_memblock_release(pchunk.memblock);
870         pa_memblock_release(rchunk.memblock);
871
872         /* drop consumed source samples */
873         pa_memblockq_drop(u->source_memblockq, u->source_output_blocksize);
874         pa_memblock_unref(rchunk.memblock);
875         rlen -= u->source_output_blocksize;
876
877         /* drop consumed sink samples */
878         pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize);
879         pa_memblock_unref(pchunk.memblock);
880
881         if (plen >= u->sink_blocksize)
882             plen -= u->sink_blocksize;
883         else
884             plen = 0;
885
886         /* forward the (echo-canceled) data to the virtual source */
887         pa_source_post(u->source, &cchunk);
888         pa_memblock_unref(cchunk.memblock);
889     }
890 }
891
892 /* Called from source I/O thread context. */
893 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
894     struct userdata *u;
895     size_t rlen, plen, to_skip;
896     pa_memchunk rchunk;
897
898     pa_source_output_assert_ref(o);
899     pa_source_output_assert_io_context(o);
900     pa_assert_se(u = o->userdata);
901
902     if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state))
903         return;
904
905     if (!PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
906         pa_log("Push when no link?");
907         return;
908     }
909
910     /* handle queued messages, do any message sending of our own */
911     while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
912         ;
913
914     pa_memblockq_push_align(u->source_memblockq, chunk);
915
916     rlen = pa_memblockq_get_length(u->source_memblockq);
917     plen = pa_memblockq_get_length(u->sink_memblockq);
918
919     /* Let's not do anything else till we have enough data to process */
920     if (rlen < u->source_output_blocksize)
921         return;
922
923     /* See if we need to drop samples in order to sync */
924     if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
925         do_resync(u);
926     }
927
928     /* Okay, skip cancellation for skipped source samples if needed. */
929     if (PA_UNLIKELY(u->source_skip)) {
930         /* The slightly tricky bit here is that we drop all but modulo
931          * blocksize bytes and then adjust for that last bit on the sink side.
932          * We do this because the source data is coming at a fixed rate, which
933          * means the only way to try to catch up is drop sink samples and let
934          * the canceller cope up with this. */
935         to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
936         to_skip -= to_skip % u->source_output_blocksize;
937
938         if (to_skip) {
939             pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
940             pa_source_post(u->source, &rchunk);
941
942             pa_memblock_unref(rchunk.memblock);
943             pa_memblockq_drop(u->source_memblockq, to_skip);
944
945             rlen -= to_skip;
946             u->source_skip -= to_skip;
947         }
948
949         if (rlen && u->source_skip % u->source_output_blocksize) {
950             u->sink_skip += (uint64_t) (u->source_output_blocksize - (u->source_skip % u->source_output_blocksize)) * u->sink_blocksize / u->source_output_blocksize;
951             u->source_skip -= (u->source_skip % u->source_output_blocksize);
952         }
953     }
954
955     /* And for the sink, these samples have been played back already, so we can
956      * just drop them and get on with it. */
957     if (PA_UNLIKELY(u->sink_skip)) {
958         to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
959
960         pa_memblockq_drop(u->sink_memblockq, to_skip);
961
962         plen -= to_skip;
963         u->sink_skip -= to_skip;
964     }
965
966     /* process and push out samples */
967     if (u->ec->params.drift_compensation)
968         do_push_drift_comp(u);
969     else
970         do_push(u);
971 }
972
973 /* Called from sink I/O thread context. */
974 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
975     struct userdata *u;
976
977     pa_sink_input_assert_ref(i);
978     pa_assert(chunk);
979     pa_assert_se(u = i->userdata);
980
981     if (!PA_SINK_IS_LINKED(u->sink->thread_info.state))
982         return -1;
983
984     if (u->sink->thread_info.rewind_requested)
985         pa_sink_process_rewind(u->sink, 0);
986
987     pa_sink_render_full(u->sink, nbytes, chunk);
988
989     if (i->thread_info.underrun_for > 0) {
990         pa_log_debug("Handling end of underrun.");
991         pa_atomic_store(&u->request_resync, 1);
992     }
993
994     /* let source thread handle the chunk. pass the sample count as well so that
995      * the source IO thread can update the right variables. */
996     pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
997         NULL, 0, chunk, NULL);
998     u->send_counter += chunk->length;
999
1000     return 0;
1001 }
1002
1003 /* Called from source I/O thread context. */
1004 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
1005     struct userdata *u;
1006
1007     pa_source_output_assert_ref(o);
1008     pa_source_output_assert_io_context(o);
1009     pa_assert_se(u = o->userdata);
1010
1011     /* If the source is not yet linked, there is nothing to rewind */
1012     if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state))
1013         return;
1014
1015     pa_source_process_rewind(u->source, nbytes);
1016
1017     /* go back on read side, we need to use older sink data for this */
1018     pa_memblockq_rewind(u->sink_memblockq, nbytes);
1019
1020     /* manipulate write index */
1021     pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, true);
1022
1023     pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
1024         (long long) pa_memblockq_get_length (u->source_memblockq));
1025 }
1026
1027 /* Called from sink I/O thread context. */
1028 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
1029     struct userdata *u;
1030
1031     pa_sink_input_assert_ref(i);
1032     pa_assert_se(u = i->userdata);
1033
1034     /* If the sink is not yet linked, there is nothing to rewind */
1035     if (!PA_SINK_IS_LINKED(u->sink->thread_info.state))
1036         return;
1037
1038     pa_log_debug("Sink process rewind %lld", (long long) nbytes);
1039
1040     pa_sink_process_rewind(u->sink, nbytes);
1041
1042     pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
1043     u->send_counter -= nbytes;
1044 }
1045
1046 /* Called from source I/O thread context. */
1047 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
1048     size_t delay, rlen, plen;
1049     pa_usec_t now, latency;
1050
1051     now = pa_rtclock_now();
1052     latency = pa_source_get_latency_within_thread(u->source_output->source, false);
1053     delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
1054
1055     delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
1056     rlen = pa_memblockq_get_length(u->source_memblockq);
1057     plen = pa_memblockq_get_length(u->sink_memblockq);
1058
1059     snapshot->source_now = now;
1060     snapshot->source_latency = latency;
1061     snapshot->source_delay = delay;
1062     snapshot->recv_counter = u->recv_counter;
1063     snapshot->rlen = rlen + u->sink_skip;
1064     snapshot->plen = plen + u->source_skip;
1065 }
1066
1067 /* Called from source I/O thread context. */
1068 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1069     struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
1070
1071     switch (code) {
1072
1073         case SOURCE_OUTPUT_MESSAGE_POST:
1074
1075             pa_source_output_assert_io_context(u->source_output);
1076
1077             if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
1078                 pa_memblockq_push_align(u->sink_memblockq, chunk);
1079             else
1080                 pa_memblockq_flush_write(u->sink_memblockq, true);
1081
1082             u->recv_counter += (int64_t) chunk->length;
1083
1084             return 0;
1085
1086         case SOURCE_OUTPUT_MESSAGE_REWIND:
1087             pa_source_output_assert_io_context(u->source_output);
1088
1089             /* manipulate write index, never go past what we have */
1090             if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
1091                 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, true);
1092             else
1093                 pa_memblockq_flush_write(u->sink_memblockq, true);
1094
1095             pa_log_debug("Sink rewind (%lld)", (long long) offset);
1096
1097             u->recv_counter -= offset;
1098
1099             return 0;
1100
1101         case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1102             struct snapshot *snapshot = (struct snapshot *) data;
1103
1104             source_output_snapshot_within_thread(u, snapshot);
1105             return 0;
1106         }
1107
1108         case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1109             apply_diff_time(u, offset);
1110             return 0;
1111
1112     }
1113
1114     return pa_source_output_process_msg(obj, code, data, offset, chunk);
1115 }
1116
1117 /* Called from sink I/O thread context. */
1118 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1119     struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1120
1121     switch (code) {
1122
1123         case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1124             size_t delay;
1125             pa_usec_t now, latency;
1126             struct snapshot *snapshot = (struct snapshot *) data;
1127
1128             pa_sink_input_assert_io_context(u->sink_input);
1129
1130             now = pa_rtclock_now();
1131             latency = pa_sink_get_latency_within_thread(u->sink_input->sink, false);
1132             delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1133
1134             delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1135
1136             snapshot->sink_now = now;
1137             snapshot->sink_latency = latency;
1138             snapshot->sink_delay = delay;
1139             snapshot->send_counter = u->send_counter;
1140             return 0;
1141         }
1142     }
1143
1144     return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1145 }
1146
1147 /* Called from sink I/O thread context. */
1148 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1149     struct userdata *u;
1150
1151     pa_sink_input_assert_ref(i);
1152     pa_assert_se(u = i->userdata);
1153
1154     pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1155
1156     /* FIXME: Too small max_rewind:
1157      * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1158     pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1159     pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1160 }
1161
1162 /* Called from source I/O thread context. */
1163 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1164     struct userdata *u;
1165
1166     pa_source_output_assert_ref(o);
1167     pa_assert_se(u = o->userdata);
1168
1169     pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1170
1171     pa_source_set_max_rewind_within_thread(u->source, nbytes);
1172 }
1173
1174 /* Called from sink I/O thread context. */
1175 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1176     struct userdata *u;
1177
1178     pa_sink_input_assert_ref(i);
1179     pa_assert_se(u = i->userdata);
1180
1181     pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1182
1183     pa_sink_set_max_request_within_thread(u->sink, nbytes);
1184 }
1185
1186 /* Called from sink I/O thread context. */
1187 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1188     struct userdata *u;
1189     pa_usec_t latency;
1190
1191     pa_sink_input_assert_ref(i);
1192     pa_assert_se(u = i->userdata);
1193
1194     latency = pa_sink_get_requested_latency_within_thread(i->sink);
1195
1196     pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1197 }
1198
1199 /* Called from source I/O thread context. */
1200 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1201     struct userdata *u;
1202     pa_usec_t latency;
1203
1204     pa_source_output_assert_ref(o);
1205     pa_assert_se(u = o->userdata);
1206
1207     latency = pa_source_get_requested_latency_within_thread(o->source);
1208
1209     pa_log_debug("Source output update requested latency %lld", (long long) latency);
1210 }
1211
1212 /* Called from sink I/O thread context. */
1213 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1214     struct userdata *u;
1215
1216     pa_sink_input_assert_ref(i);
1217     pa_assert_se(u = i->userdata);
1218
1219     pa_log_debug("Sink input update latency range %lld %lld",
1220         (long long) i->sink->thread_info.min_latency,
1221         (long long) i->sink->thread_info.max_latency);
1222
1223     pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1224 }
1225
1226 /* Called from source I/O thread context. */
1227 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1228     struct userdata *u;
1229
1230     pa_source_output_assert_ref(o);
1231     pa_assert_se(u = o->userdata);
1232
1233     pa_log_debug("Source output update latency range %lld %lld",
1234         (long long) o->source->thread_info.min_latency,
1235         (long long) o->source->thread_info.max_latency);
1236
1237     pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1238 }
1239
1240 /* Called from sink I/O thread context. */
1241 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1242     struct userdata *u;
1243
1244     pa_sink_input_assert_ref(i);
1245     pa_assert_se(u = i->userdata);
1246
1247     pa_log_debug("Sink input update fixed latency %lld",
1248         (long long) i->sink->thread_info.fixed_latency);
1249
1250     pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1251 }
1252
1253 /* Called from source I/O thread context. */
1254 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1255     struct userdata *u;
1256
1257     pa_source_output_assert_ref(o);
1258     pa_assert_se(u = o->userdata);
1259
1260     pa_log_debug("Source output update fixed latency %lld",
1261         (long long) o->source->thread_info.fixed_latency);
1262
1263     pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1264 }
1265
1266 /* Called from source I/O thread context. */
1267 static void source_output_attach_cb(pa_source_output *o) {
1268     struct userdata *u;
1269
1270     pa_source_output_assert_ref(o);
1271     pa_source_output_assert_io_context(o);
1272     pa_assert_se(u = o->userdata);
1273
1274     pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1275     pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1276     pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1277     pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1278
1279     pa_log_debug("Source output %d attach", o->index);
1280
1281     if (PA_SOURCE_IS_LINKED(u->source->thread_info.state))
1282         pa_source_attach_within_thread(u->source);
1283
1284     u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1285             o->source->thread_info.rtpoll,
1286             PA_RTPOLL_LATE,
1287             u->asyncmsgq);
1288 }
1289
1290 /* Called from sink I/O thread context. */
1291 static void sink_input_attach_cb(pa_sink_input *i) {
1292     struct userdata *u;
1293
1294     pa_sink_input_assert_ref(i);
1295     pa_assert_se(u = i->userdata);
1296
1297     pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1298     pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1299
1300     /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1301      * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1302     pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1303
1304     /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1305      * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1306      * HERE. SEE (6) */
1307     pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1308
1309     /* FIXME: Too small max_rewind:
1310      * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1311     pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1312
1313     pa_log_debug("Sink input %d attach", i->index);
1314
1315     u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1316             i->sink->thread_info.rtpoll,
1317             PA_RTPOLL_LATE,
1318             u->asyncmsgq);
1319
1320     if (PA_SINK_IS_LINKED(u->sink->thread_info.state))
1321         pa_sink_attach_within_thread(u->sink);
1322 }
1323
1324 /* Called from source I/O thread context. */
1325 static void source_output_detach_cb(pa_source_output *o) {
1326     struct userdata *u;
1327
1328     pa_source_output_assert_ref(o);
1329     pa_source_output_assert_io_context(o);
1330     pa_assert_se(u = o->userdata);
1331
1332     if (PA_SOURCE_IS_LINKED(u->source->thread_info.state))
1333         pa_source_detach_within_thread(u->source);
1334     pa_source_set_rtpoll(u->source, NULL);
1335
1336     pa_log_debug("Source output %d detach", o->index);
1337
1338     if (u->rtpoll_item_read) {
1339         pa_rtpoll_item_free(u->rtpoll_item_read);
1340         u->rtpoll_item_read = NULL;
1341     }
1342 }
1343
1344 /* Called from sink I/O thread context. */
1345 static void sink_input_detach_cb(pa_sink_input *i) {
1346     struct userdata *u;
1347
1348     pa_sink_input_assert_ref(i);
1349     pa_assert_se(u = i->userdata);
1350
1351     if (PA_SINK_IS_LINKED(u->sink->thread_info.state))
1352         pa_sink_detach_within_thread(u->sink);
1353
1354     pa_sink_set_rtpoll(u->sink, NULL);
1355
1356     pa_log_debug("Sink input %d detach", i->index);
1357
1358     if (u->rtpoll_item_write) {
1359         pa_rtpoll_item_free(u->rtpoll_item_write);
1360         u->rtpoll_item_write = NULL;
1361     }
1362 }
1363
1364 /* Called from source I/O thread context except when cork() is called without valid source. */
1365 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1366     struct userdata *u;
1367
1368     pa_source_output_assert_ref(o);
1369     pa_assert_se(u = o->userdata);
1370
1371     pa_log_debug("Source output %d state %d", o->index, state);
1372 }
1373
1374 /* Called from sink I/O thread context. */
1375 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1376     struct userdata *u;
1377
1378     pa_sink_input_assert_ref(i);
1379     pa_assert_se(u = i->userdata);
1380
1381     pa_log_debug("Sink input %d state %d", i->index, state);
1382 }
1383
1384 /* Called from main context. */
1385 static void source_output_kill_cb(pa_source_output *o) {
1386     struct userdata *u;
1387
1388     pa_source_output_assert_ref(o);
1389     pa_assert_ctl_context();
1390     pa_assert_se(u = o->userdata);
1391
1392     u->dead = true;
1393
1394     /* The order here matters! We first kill the source so that streams can
1395      * properly be moved away while the source output is still connected to
1396      * the master. */
1397     pa_source_output_cork(u->source_output, true);
1398     pa_source_unlink(u->source);
1399     pa_source_output_unlink(u->source_output);
1400
1401     pa_source_output_unref(u->source_output);
1402     u->source_output = NULL;
1403
1404     pa_source_unref(u->source);
1405     u->source = NULL;
1406
1407     pa_log_debug("Source output kill %d", o->index);
1408
1409     pa_module_unload_request(u->module, true);
1410 }
1411
1412 /* Called from main context */
1413 static void sink_input_kill_cb(pa_sink_input *i) {
1414     struct userdata *u;
1415
1416     pa_sink_input_assert_ref(i);
1417     pa_assert_se(u = i->userdata);
1418
1419     u->dead = true;
1420
1421     /* The order here matters! We first kill the sink so that streams
1422      * can properly be moved away while the sink input is still connected
1423      * to the master. */
1424     pa_sink_input_cork(u->sink_input, true);
1425     pa_sink_unlink(u->sink);
1426     pa_sink_input_unlink(u->sink_input);
1427
1428     pa_sink_input_unref(u->sink_input);
1429     u->sink_input = NULL;
1430
1431     pa_sink_unref(u->sink);
1432     u->sink = NULL;
1433
1434     pa_log_debug("Sink input kill %d", i->index);
1435
1436     pa_module_unload_request(u->module, true);
1437 }
1438
1439 /* Called from main context. */
1440 static bool source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1441     struct userdata *u;
1442
1443     pa_source_output_assert_ref(o);
1444     pa_assert_ctl_context();
1445     pa_assert_se(u = o->userdata);
1446
1447     if (u->dead)
1448         return false;
1449
1450     return (u->source != dest) && (u->sink != dest->monitor_of);
1451 }
1452
1453 /* Called from main context */
1454 static bool sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1455     struct userdata *u;
1456
1457     pa_sink_input_assert_ref(i);
1458     pa_assert_se(u = i->userdata);
1459
1460     if (u->dead)
1461         return false;
1462
1463     return u->sink != dest;
1464 }
1465
1466 /* Called from main context. */
1467 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1468     struct userdata *u;
1469     uint32_t idx;
1470     pa_source_output *output;
1471
1472     pa_source_output_assert_ref(o);
1473     pa_assert_ctl_context();
1474     pa_assert_se(u = o->userdata);
1475
1476     if (dest) {
1477         pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1478         pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1479     } else
1480         pa_source_set_asyncmsgq(u->source, NULL);
1481
1482     /* Propagate asyncmsq change to attached virtual sources */
1483     PA_IDXSET_FOREACH(output, u->source->outputs, idx) {
1484         if (output->destination_source && output->moving)
1485             output->moving(output, u->source);
1486     }
1487
1488     if (u->source_auto_desc && dest) {
1489         const char *y, *z;
1490         pa_proplist *pl;
1491
1492         pl = pa_proplist_new();
1493         if (u->sink_input->sink) {
1494             pa_proplist_sets(pl, PA_PROP_DEVICE_MASTER_DEVICE, u->sink_input->sink->name);
1495             y = pa_proplist_gets(u->sink_input->sink->proplist, PA_PROP_DEVICE_DESCRIPTION);
1496         } else
1497             y = "<unknown>"; /* Probably in the middle of a move */
1498         z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1499         pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1500                 y ? y : u->sink_input->sink->name);
1501
1502         pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1503         pa_proplist_free(pl);
1504     }
1505 }
1506
1507 /* Called from main context */
1508 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1509     struct userdata *u;
1510
1511     pa_sink_input_assert_ref(i);
1512     pa_assert_se(u = i->userdata);
1513
1514     if (dest) {
1515         pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1516         pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1517     } else
1518         pa_sink_set_asyncmsgq(u->sink, NULL);
1519
1520     if (u->sink_auto_desc && dest) {
1521         const char *y, *z;
1522         pa_proplist *pl;
1523
1524         pl = pa_proplist_new();
1525         if (u->source_output->source) {
1526             pa_proplist_sets(pl, PA_PROP_DEVICE_MASTER_DEVICE, u->source_output->source->name);
1527             y = pa_proplist_gets(u->source_output->source->proplist, PA_PROP_DEVICE_DESCRIPTION);
1528         } else
1529             y = "<unknown>"; /* Probably in the middle of a move */
1530         z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1531         pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1532                          y ? y : u->source_output->source->name);
1533
1534         pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1535         pa_proplist_free(pl);
1536     }
1537 }
1538
1539 /* Called from main context */
1540 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1541     struct userdata *u;
1542
1543     pa_sink_input_assert_ref(i);
1544     pa_assert_se(u = i->userdata);
1545
1546     pa_sink_volume_changed(u->sink, &i->volume);
1547 }
1548
1549 /* Called from main context */
1550 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1551     struct userdata *u;
1552
1553     pa_sink_input_assert_ref(i);
1554     pa_assert_se(u = i->userdata);
1555
1556     pa_sink_mute_changed(u->sink, i->muted);
1557 }
1558
1559 /* Called from main context */
1560 static int canceller_process_msg_cb(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1561     struct pa_echo_canceller_msg *msg;
1562     struct userdata *u;
1563
1564     pa_assert(o);
1565
1566     msg = PA_ECHO_CANCELLER_MSG(o);
1567
1568     /* When the module is unloaded, there may still remain queued messages for
1569      * the canceller. Messages are sent to the main thread using the master
1570      * source's asyncmsgq, and that message queue isn't (and can't be, at least
1571      * with the current asyncmsgq API) cleared from the canceller messages when
1572      * module-echo-cancel is unloaded.
1573      *
1574      * The userdata may already have been freed at this point, but the
1575      * asyncmsgq holds a reference to the pa_echo_canceller_msg object, which
1576      * contains a flag to indicate that all remaining messages have to be
1577      * ignored. */
1578     if (msg->dead)
1579         return 0;
1580
1581     u = msg->userdata;
1582
1583     switch (code) {
1584         case ECHO_CANCELLER_MESSAGE_SET_VOLUME: {
1585             pa_volume_t v = PA_PTR_TO_UINT(userdata);
1586             pa_cvolume vol;
1587
1588             if (u->use_volume_sharing) {
1589                 pa_cvolume_set(&vol, u->source->sample_spec.channels, v);
1590                 pa_source_set_volume(u->source, &vol, true, false);
1591             } else {
1592                 pa_cvolume_set(&vol, u->source_output->sample_spec.channels, v);
1593                 pa_source_output_set_volume(u->source_output, &vol, false, true);
1594             }
1595
1596             break;
1597         }
1598
1599         default:
1600             pa_assert_not_reached();
1601             break;
1602     }
1603
1604     return 0;
1605 }
1606
1607 /* Called by the canceller, so source I/O thread context. */
1608 pa_volume_t pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec) {
1609 #ifndef ECHO_CANCEL_TEST
1610     return pa_cvolume_avg(&ec->msg->userdata->thread_info.current_volume);
1611 #else
1612     return PA_VOLUME_NORM;
1613 #endif
1614 }
1615
1616 /* Called by the canceller, so source I/O thread context. */
1617 void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_volume_t v) {
1618 #ifndef ECHO_CANCEL_TEST
1619     if (pa_cvolume_avg(&ec->msg->userdata->thread_info.current_volume) != v) {
1620         pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(ec->msg), ECHO_CANCELLER_MESSAGE_SET_VOLUME, PA_UINT_TO_PTR(v),
1621                 0, NULL, NULL);
1622     }
1623 #endif
1624 }
1625
1626 uint32_t pa_echo_canceller_blocksize_power2(unsigned rate, unsigned ms) {
1627     unsigned nframes = (rate * ms) / 1000;
1628     uint32_t y = 1 << ((8 * sizeof(uint32_t)) - 2);
1629
1630     pa_assert(rate >= 4000);
1631     pa_assert(ms >= 1);
1632
1633     /* nframes should be a power of 2, round down to nearest power of two */
1634     while (y > nframes)
1635         y >>= 1;
1636
1637     pa_assert(y >= 1);
1638     return y;
1639 }
1640
1641 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1642     if (pa_streq(method, "null"))
1643         return PA_ECHO_CANCELLER_NULL;
1644 #ifdef HAVE_SPEEX
1645     if (pa_streq(method, "speex"))
1646         return PA_ECHO_CANCELLER_SPEEX;
1647 #endif
1648 #ifdef HAVE_ADRIAN_EC
1649     if (pa_streq(method, "adrian"))
1650         return PA_ECHO_CANCELLER_ADRIAN;
1651 #endif
1652 #ifdef HAVE_WEBRTC
1653     if (pa_streq(method, "webrtc"))
1654         return PA_ECHO_CANCELLER_WEBRTC;
1655 #endif
1656     return PA_ECHO_CANCELLER_INVALID;
1657 }
1658
1659 /* Common initialisation bits between module-echo-cancel and the standalone
1660  * test program.
1661  *
1662  * Called from main context. */
1663 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1664     const char *ec_string;
1665     pa_echo_canceller_method_t ec_method;
1666
1667     if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1668         pa_log("Invalid sample format specification or channel map");
1669         goto fail;
1670     }
1671
1672     u->ec = pa_xnew0(pa_echo_canceller, 1);
1673     if (!u->ec) {
1674         pa_log("Failed to alloc echo canceller");
1675         goto fail;
1676     }
1677
1678     ec_string = pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER);
1679     if ((ec_method = get_ec_method_from_string(ec_string)) < 0) {
1680         pa_log("Invalid echo canceller implementation '%s'", ec_string);
1681         goto fail;
1682     }
1683
1684     pa_log_info("Using AEC engine: %s", ec_string);
1685
1686     u->ec->init = ec_table[ec_method].init;
1687     u->ec->play = ec_table[ec_method].play;
1688     u->ec->record = ec_table[ec_method].record;
1689     u->ec->set_drift = ec_table[ec_method].set_drift;
1690     u->ec->run = ec_table[ec_method].run;
1691     u->ec->done = ec_table[ec_method].done;
1692
1693     return 0;
1694
1695 fail:
1696     return -1;
1697 }
1698
1699 /* Called from main context. */
1700 int pa__init(pa_module*m) {
1701     struct userdata *u;
1702     pa_sample_spec source_output_ss, source_ss, sink_ss;
1703     pa_channel_map source_output_map, source_map, sink_map;
1704     pa_modargs *ma;
1705     pa_source *source_master=NULL;
1706     pa_sink *sink_master=NULL;
1707     bool autoloaded;
1708     pa_source_output_new_data source_output_data;
1709     pa_sink_input_new_data sink_input_data;
1710     pa_source_new_data source_data;
1711     pa_sink_new_data sink_data;
1712     pa_memchunk silence;
1713     uint32_t temp;
1714     uint32_t nframes = 0;
1715     bool use_master_format;
1716     pa_usec_t blocksize_usec;
1717
1718     pa_assert(m);
1719
1720     if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1721         pa_log("Failed to parse module arguments.");
1722         goto fail;
1723     }
1724
1725     if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1726         pa_log("Master source not found");
1727         goto fail;
1728     }
1729     pa_assert(source_master);
1730
1731     if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1732         pa_log("Master sink not found");
1733         goto fail;
1734     }
1735     pa_assert(sink_master);
1736
1737     if (source_master->monitor_of == sink_master) {
1738         pa_log("Can't cancel echo between a sink and its monitor");
1739         goto fail;
1740     }
1741
1742     /* Set to true if we just want to inherit sample spec and channel map from the sink and source master */
1743     use_master_format = DEFAULT_USE_MASTER_FORMAT;
1744     if (pa_modargs_get_value_boolean(ma, "use_master_format", &use_master_format) < 0) {
1745         pa_log("use_master_format= expects a boolean argument");
1746         goto fail;
1747     }
1748
1749     source_ss = source_master->sample_spec;
1750     sink_ss = sink_master->sample_spec;
1751
1752     if (use_master_format) {
1753         source_map = source_master->channel_map;
1754         sink_map = sink_master->channel_map;
1755     } else {
1756         source_ss = source_master->sample_spec;
1757         source_ss.rate = DEFAULT_RATE;
1758         source_ss.channels = DEFAULT_CHANNELS;
1759         pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1760
1761         sink_ss = sink_master->sample_spec;
1762         sink_ss.rate = DEFAULT_RATE;
1763         sink_ss.channels = DEFAULT_CHANNELS;
1764         pa_channel_map_init_auto(&sink_map, sink_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1765     }
1766
1767     u = pa_xnew0(struct userdata, 1);
1768     if (!u) {
1769         pa_log("Failed to alloc userdata");
1770         goto fail;
1771     }
1772     u->core = m->core;
1773     u->module = m;
1774     m->userdata = u;
1775     u->dead = false;
1776
1777     u->use_volume_sharing = true;
1778     if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &u->use_volume_sharing) < 0) {
1779         pa_log("use_volume_sharing= expects a boolean argument");
1780         goto fail;
1781     }
1782
1783     temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1784     if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1785         pa_log("Failed to parse adjust_time value");
1786         goto fail;
1787     }
1788
1789     if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1790         u->adjust_time = temp * PA_USEC_PER_SEC;
1791     else
1792         u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1793
1794     temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1795     if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1796         pa_log("Failed to parse adjust_threshold value");
1797         goto fail;
1798     }
1799
1800     if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1801         u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1802     else
1803         u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1804
1805     u->save_aec = DEFAULT_SAVE_AEC;
1806     if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1807         pa_log("Failed to parse save_aec value");
1808         goto fail;
1809     }
1810
1811     autoloaded = DEFAULT_AUTOLOADED;
1812     if (pa_modargs_get_value_boolean(ma, "autoloaded", &autoloaded) < 0) {
1813         pa_log("Failed to parse autoloaded value");
1814         goto fail;
1815     }
1816
1817     if (init_common(ma, u, &source_ss, &source_map) < 0)
1818         goto fail;
1819
1820     u->asyncmsgq = pa_asyncmsgq_new(0);
1821     if (!u->asyncmsgq) {
1822         pa_log("pa_asyncmsgq_new() failed.");
1823         goto fail;
1824     }
1825
1826     u->need_realign = true;
1827
1828     source_output_ss = source_ss;
1829     source_output_map = source_map;
1830
1831     if (sink_ss.rate != source_ss.rate) {
1832         pa_log_info("Sample rates of play and out stream differ. Adjusting rate of play stream.");
1833         sink_ss.rate = source_ss.rate;
1834     }
1835
1836     pa_assert(u->ec->init);
1837     if (!u->ec->init(u->core, u->ec, &source_output_ss, &source_output_map, &sink_ss, &sink_map, &source_ss, &source_map, &nframes, pa_modargs_get_value(ma, "aec_args", NULL))) {
1838         pa_log("Failed to init AEC engine");
1839         goto fail;
1840     }
1841
1842     pa_assert(source_output_ss.rate == source_ss.rate);
1843     pa_assert(sink_ss.rate == source_ss.rate);
1844
1845     u->source_output_blocksize = nframes * pa_frame_size(&source_output_ss);
1846     u->source_blocksize = nframes * pa_frame_size(&source_ss);
1847     u->sink_blocksize = nframes * pa_frame_size(&sink_ss);
1848
1849     if (u->ec->params.drift_compensation)
1850         pa_assert(u->ec->set_drift);
1851
1852     /* Create source */
1853     pa_source_new_data_init(&source_data);
1854     source_data.driver = __FILE__;
1855     source_data.module = m;
1856     if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1857         source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1858     pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1859     pa_source_new_data_set_channel_map(&source_data, &source_map);
1860     pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1861     pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1862     if (!autoloaded)
1863         pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1864
1865     if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1866         pa_log("Invalid properties");
1867         pa_source_new_data_done(&source_data);
1868         goto fail;
1869     }
1870
1871     if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1872         const char *y, *z;
1873
1874         y = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1875         z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1876         pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1877                 z ? z : source_master->name, y ? y : sink_master->name);
1878     }
1879
1880     u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1881                                                      | (u->use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1882     pa_source_new_data_done(&source_data);
1883
1884     if (!u->source) {
1885         pa_log("Failed to create source.");
1886         goto fail;
1887     }
1888
1889     u->source->parent.process_msg = source_process_msg_cb;
1890     u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb;
1891     u->source->update_requested_latency = source_update_requested_latency_cb;
1892     pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1893     if (!u->use_volume_sharing) {
1894         pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1895         pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1896         pa_source_enable_decibel_volume(u->source, true);
1897     }
1898     u->source->userdata = u;
1899
1900     pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1901
1902     /* Create sink */
1903     pa_sink_new_data_init(&sink_data);
1904     sink_data.driver = __FILE__;
1905     sink_data.module = m;
1906     if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1907         sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1908     pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1909     pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1910     pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1911     pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1912     if (!autoloaded)
1913         pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1914
1915     if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1916         pa_log("Invalid properties");
1917         pa_sink_new_data_done(&sink_data);
1918         goto fail;
1919     }
1920
1921     if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1922         const char *y, *z;
1923
1924         y = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1925         z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1926         pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1927                 z ? z : sink_master->name, y ? y : source_master->name);
1928     }
1929
1930     u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1931                                                | (u->use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1932     pa_sink_new_data_done(&sink_data);
1933
1934     if (!u->sink) {
1935         pa_log("Failed to create sink.");
1936         goto fail;
1937     }
1938
1939     u->sink->parent.process_msg = sink_process_msg_cb;
1940     u->sink->set_state_in_main_thread = sink_set_state_in_main_thread_cb;
1941     u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb;
1942     u->sink->update_requested_latency = sink_update_requested_latency_cb;
1943     u->sink->request_rewind = sink_request_rewind_cb;
1944     pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1945     if (!u->use_volume_sharing) {
1946         pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1947         pa_sink_enable_decibel_volume(u->sink, true);
1948     }
1949     u->sink->userdata = u;
1950
1951     pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1952
1953     /* Create source output */
1954     pa_source_output_new_data_init(&source_output_data);
1955     source_output_data.driver = __FILE__;
1956     source_output_data.module = m;
1957     pa_source_output_new_data_set_source(&source_output_data, source_master, false, true);
1958     source_output_data.destination_source = u->source;
1959
1960     pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1961     pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1962     pa_source_output_new_data_set_sample_spec(&source_output_data, &source_output_ss);
1963     pa_source_output_new_data_set_channel_map(&source_output_data, &source_output_map);
1964     source_output_data.flags |= PA_SOURCE_OUTPUT_START_CORKED;
1965
1966     if (autoloaded)
1967         source_output_data.flags |= PA_SOURCE_OUTPUT_DONT_MOVE;
1968
1969     pa_source_output_new(&u->source_output, m->core, &source_output_data);
1970     pa_source_output_new_data_done(&source_output_data);
1971
1972     if (!u->source_output)
1973         goto fail;
1974
1975     u->source_output->parent.process_msg = source_output_process_msg_cb;
1976     u->source_output->push = source_output_push_cb;
1977     u->source_output->process_rewind = source_output_process_rewind_cb;
1978     u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1979     u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1980     u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1981     u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1982     u->source_output->kill = source_output_kill_cb;
1983     u->source_output->attach = source_output_attach_cb;
1984     u->source_output->detach = source_output_detach_cb;
1985     u->source_output->state_change = source_output_state_change_cb;
1986     u->source_output->may_move_to = source_output_may_move_to_cb;
1987     u->source_output->moving = source_output_moving_cb;
1988     u->source_output->userdata = u;
1989
1990     u->source->output_from_master = u->source_output;
1991
1992     /* Create sink input */
1993     pa_sink_input_new_data_init(&sink_input_data);
1994     sink_input_data.driver = __FILE__;
1995     sink_input_data.module = m;
1996     pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, false, true);
1997     sink_input_data.origin_sink = u->sink;
1998     pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1999     pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
2000     pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
2001     pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
2002     sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE | PA_SINK_INPUT_START_CORKED;
2003
2004     if (autoloaded)
2005         sink_input_data.flags |= PA_SINK_INPUT_DONT_MOVE;
2006
2007     pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
2008     pa_sink_input_new_data_done(&sink_input_data);
2009
2010     if (!u->sink_input)
2011         goto fail;
2012
2013     u->sink_input->parent.process_msg = sink_input_process_msg_cb;
2014     u->sink_input->pop = sink_input_pop_cb;
2015     u->sink_input->process_rewind = sink_input_process_rewind_cb;
2016     u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
2017     u->sink_input->update_max_request = sink_input_update_max_request_cb;
2018     u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
2019     u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
2020     u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
2021     u->sink_input->kill = sink_input_kill_cb;
2022     u->sink_input->attach = sink_input_attach_cb;
2023     u->sink_input->detach = sink_input_detach_cb;
2024     u->sink_input->state_change = sink_input_state_change_cb;
2025     u->sink_input->may_move_to = sink_input_may_move_to_cb;
2026     u->sink_input->moving = sink_input_moving_cb;
2027     if (!u->use_volume_sharing)
2028         u->sink_input->volume_changed = sink_input_volume_changed_cb;
2029     u->sink_input->mute_changed = sink_input_mute_changed_cb;
2030     u->sink_input->userdata = u;
2031
2032     u->sink->input_to_master = u->sink_input;
2033
2034     pa_sink_input_get_silence(u->sink_input, &silence);
2035
2036     u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
2037         &source_output_ss, 1, 1, 0, &silence);
2038     u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
2039         &sink_ss, 0, 1, 0, &silence);
2040
2041     pa_memblock_unref(silence.memblock);
2042
2043     if (!u->source_memblockq || !u->sink_memblockq) {
2044         pa_log("Failed to create memblockq.");
2045         goto fail;
2046     }
2047
2048     if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
2049         u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
2050     else if (u->ec->params.drift_compensation) {
2051         pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
2052         u->adjust_time = 0;
2053         /* Perform resync just once to give the canceller a leg up */
2054         pa_atomic_store(&u->request_resync, 1);
2055     }
2056
2057     if (u->save_aec) {
2058         pa_log("Creating AEC files in /tmp");
2059         u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
2060         if (u->captured_file == NULL)
2061             perror ("fopen failed");
2062         u->played_file = fopen("/tmp/aec_play.sw", "wb");
2063         if (u->played_file == NULL)
2064             perror ("fopen failed");
2065         u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
2066         if (u->canceled_file == NULL)
2067             perror ("fopen failed");
2068         if (u->ec->params.drift_compensation) {
2069             u->drift_file = fopen("/tmp/aec_drift.txt", "w");
2070             if (u->drift_file == NULL)
2071                 perror ("fopen failed");
2072         }
2073     }
2074
2075     u->ec->msg = pa_msgobject_new(pa_echo_canceller_msg);
2076     u->ec->msg->parent.process_msg = canceller_process_msg_cb;
2077     u->ec->msg->userdata = u;
2078
2079     u->thread_info.current_volume = u->source->reference_volume;
2080
2081     /* We don't want to deal with too many chunks at a time */
2082     blocksize_usec = pa_bytes_to_usec(u->source_blocksize, &u->source->sample_spec);
2083     if (u->source->flags & PA_SOURCE_DYNAMIC_LATENCY)
2084         pa_source_set_latency_range(u->source, blocksize_usec, blocksize_usec * MAX_LATENCY_BLOCKS);
2085     pa_source_output_set_requested_latency(u->source_output, blocksize_usec * MAX_LATENCY_BLOCKS);
2086
2087     blocksize_usec = pa_bytes_to_usec(u->sink_blocksize, &u->sink->sample_spec);
2088     if (u->sink->flags & PA_SINK_DYNAMIC_LATENCY)
2089         pa_sink_set_latency_range(u->sink, blocksize_usec, blocksize_usec * MAX_LATENCY_BLOCKS);
2090     pa_sink_input_set_requested_latency(u->sink_input, blocksize_usec * MAX_LATENCY_BLOCKS);
2091
2092     /* The order here is important. The input/output must be put first,
2093      * otherwise streams might attach to the sink/source before the
2094      * sink input or source output is attached to the master. */
2095     pa_sink_input_put(u->sink_input);
2096     pa_source_output_put(u->source_output);
2097
2098     pa_sink_put(u->sink);
2099     pa_source_put(u->source);
2100
2101     pa_source_output_cork(u->source_output, false);
2102     pa_sink_input_cork(u->sink_input, false);
2103
2104     pa_modargs_free(ma);
2105
2106     return 0;
2107
2108 fail:
2109     if (ma)
2110         pa_modargs_free(ma);
2111
2112     pa__done(m);
2113
2114     return -1;
2115 }
2116
2117 /* Called from main context. */
2118 int pa__get_n_used(pa_module *m) {
2119     struct userdata *u;
2120
2121     pa_assert(m);
2122     pa_assert_se(u = m->userdata);
2123
2124     return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
2125 }
2126
2127 /* Called from main context. */
2128 void pa__done(pa_module*m) {
2129     struct userdata *u;
2130
2131     pa_assert(m);
2132
2133     if (!(u = m->userdata))
2134         return;
2135
2136     u->dead = true;
2137
2138     /* See comments in source_output_kill_cb() above regarding
2139      * destruction order! */
2140
2141     if (u->time_event)
2142         u->core->mainloop->time_free(u->time_event);
2143
2144     if (u->source_output)
2145         pa_source_output_cork(u->source_output, true);
2146     if (u->sink_input)
2147         pa_sink_input_cork(u->sink_input, true);
2148
2149     if (u->source)
2150         pa_source_unlink(u->source);
2151     if (u->sink)
2152         pa_sink_unlink(u->sink);
2153
2154     if (u->source_output) {
2155         pa_source_output_unlink(u->source_output);
2156         pa_source_output_unref(u->source_output);
2157     }
2158
2159     if (u->sink_input) {
2160         pa_sink_input_unlink(u->sink_input);
2161         pa_sink_input_unref(u->sink_input);
2162     }
2163
2164     if (u->source)
2165         pa_source_unref(u->source);
2166     if (u->sink)
2167         pa_sink_unref(u->sink);
2168
2169     if (u->source_memblockq)
2170         pa_memblockq_free(u->source_memblockq);
2171     if (u->sink_memblockq)
2172         pa_memblockq_free(u->sink_memblockq);
2173
2174     if (u->ec) {
2175         if (u->ec->done)
2176             u->ec->done(u->ec);
2177
2178         if (u->ec->msg) {
2179             u->ec->msg->dead = true;
2180             pa_echo_canceller_msg_unref(u->ec->msg);
2181         }
2182
2183         pa_xfree(u->ec);
2184     }
2185
2186     if (u->asyncmsgq)
2187         pa_asyncmsgq_unref(u->asyncmsgq);
2188
2189     if (u->save_aec) {
2190         if (u->played_file)
2191             fclose(u->played_file);
2192         if (u->captured_file)
2193             fclose(u->captured_file);
2194         if (u->canceled_file)
2195             fclose(u->canceled_file);
2196         if (u->drift_file)
2197             fclose(u->drift_file);
2198     }
2199
2200     pa_xfree(u);
2201 }
2202
2203 #ifdef ECHO_CANCEL_TEST
2204 /*
2205  * Stand-alone test program for running in the canceller on pre-recorded files.
2206  */
2207 int main(int argc, char* argv[]) {
2208     struct userdata u;
2209     pa_sample_spec source_output_ss, source_ss, sink_ss;
2210     pa_channel_map source_output_map, source_map, sink_map;
2211     pa_modargs *ma = NULL;
2212     uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
2213     int unused PA_GCC_UNUSED;
2214     int ret = 0, i;
2215     char c;
2216     float drift;
2217     uint32_t nframes;
2218
2219     if (!getenv("MAKE_CHECK"))
2220         pa_log_set_level(PA_LOG_DEBUG);
2221
2222     pa_memzero(&u, sizeof(u));
2223
2224     if (argc < 4 || argc > 7) {
2225         goto usage;
2226     }
2227
2228     u.captured_file = fopen(argv[2], "rb");
2229     if (u.captured_file == NULL) {
2230         perror ("Could not open capture file");
2231         goto fail;
2232     }
2233     u.played_file = fopen(argv[1], "rb");
2234     if (u.played_file == NULL) {
2235         perror ("Could not open play file");
2236         goto fail;
2237     }
2238     u.canceled_file = fopen(argv[3], "wb");
2239     if (u.canceled_file == NULL) {
2240         perror ("Could not open canceled file");
2241         goto fail;
2242     }
2243
2244     u.core = pa_xnew0(pa_core, 1);
2245     u.core->cpu_info.cpu_type = PA_CPU_X86;
2246     u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
2247
2248     if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
2249         pa_log("Failed to parse module arguments.");
2250         goto fail;
2251     }
2252
2253     source_ss.format = PA_SAMPLE_FLOAT32LE;
2254     source_ss.rate = DEFAULT_RATE;
2255     source_ss.channels = DEFAULT_CHANNELS;
2256     pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2257
2258     sink_ss.format = PA_SAMPLE_FLOAT32LE;
2259     sink_ss.rate = DEFAULT_RATE;
2260     sink_ss.channels = DEFAULT_CHANNELS;
2261     pa_channel_map_init_auto(&sink_map, sink_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2262
2263     if (init_common(ma, &u, &source_ss, &source_map) < 0)
2264         goto fail;
2265
2266     source_output_ss = source_ss;
2267     source_output_map = source_map;
2268
2269     if (!u.ec->init(u.core, u.ec, &source_output_ss, &source_output_map, &sink_ss, &sink_map, &source_ss, &source_map, &nframes,
2270                      pa_modargs_get_value(ma, "aec_args", NULL))) {
2271         pa_log("Failed to init AEC engine");
2272         goto fail;
2273     }
2274     u.source_output_blocksize = nframes * pa_frame_size(&source_output_ss);
2275     u.source_blocksize = nframes * pa_frame_size(&source_ss);
2276     u.sink_blocksize = nframes * pa_frame_size(&sink_ss);
2277
2278     if (u.ec->params.drift_compensation) {
2279         if (argc < 6) {
2280             pa_log("Drift compensation enabled but drift file not specified");
2281             goto fail;
2282         }
2283
2284         u.drift_file = fopen(argv[5], "rt");
2285
2286         if (u.drift_file == NULL) {
2287             perror ("Could not open drift file");
2288             goto fail;
2289         }
2290     }
2291
2292     rdata = pa_xmalloc(u.source_output_blocksize);
2293     pdata = pa_xmalloc(u.sink_blocksize);
2294     cdata = pa_xmalloc(u.source_blocksize);
2295
2296     if (!u.ec->params.drift_compensation) {
2297         while (fread(rdata, u.source_output_blocksize, 1, u.captured_file) > 0) {
2298             if (fread(pdata, u.sink_blocksize, 1, u.played_file) == 0) {
2299                 perror("Played file ended before captured file");
2300                 goto fail;
2301             }
2302
2303             u.ec->run(u.ec, rdata, pdata, cdata);
2304
2305             unused = fwrite(cdata, u.source_blocksize, 1, u.canceled_file);
2306         }
2307     } else {
2308         while (fscanf(u.drift_file, "%c", &c) > 0) {
2309             switch (c) {
2310                 case 'd':
2311                     if (!fscanf(u.drift_file, "%a", &drift)) {
2312                         perror("Drift file incomplete");
2313                         goto fail;
2314                     }
2315
2316                     u.ec->set_drift(u.ec, drift);
2317
2318                     break;
2319
2320                 case 'c':
2321                     if (!fscanf(u.drift_file, "%d", &i)) {
2322                         perror("Drift file incomplete");
2323                         goto fail;
2324                     }
2325
2326                     if (fread(rdata, i, 1, u.captured_file) <= 0) {
2327                         perror("Captured file ended prematurely");
2328                         goto fail;
2329                     }
2330
2331                     u.ec->record(u.ec, rdata, cdata);
2332
2333                     unused = fwrite(cdata, i, 1, u.canceled_file);
2334
2335                     break;
2336
2337                 case 'p':
2338                     if (!fscanf(u.drift_file, "%d", &i)) {
2339                         perror("Drift file incomplete");
2340                         goto fail;
2341                     }
2342
2343                     if (fread(pdata, i, 1, u.played_file) <= 0) {
2344                         perror("Played file ended prematurely");
2345                         goto fail;
2346                     }
2347
2348                     u.ec->play(u.ec, pdata);
2349
2350                     break;
2351             }
2352         }
2353
2354         if (fread(rdata, i, 1, u.captured_file) > 0)
2355             pa_log("All capture data was not consumed");
2356         if (fread(pdata, i, 1, u.played_file) > 0)
2357             pa_log("All playback data was not consumed");
2358     }
2359
2360     u.ec->done(u.ec);
2361     u.ec->msg->dead = true;
2362     pa_echo_canceller_msg_unref(u.ec->msg);
2363
2364 out:
2365     if (u.captured_file)
2366         fclose(u.captured_file);
2367     if (u.played_file)
2368         fclose(u.played_file);
2369     if (u.canceled_file)
2370         fclose(u.canceled_file);
2371     if (u.drift_file)
2372         fclose(u.drift_file);
2373
2374     pa_xfree(rdata);
2375     pa_xfree(pdata);
2376     pa_xfree(cdata);
2377
2378     pa_xfree(u.ec);
2379     pa_xfree(u.core);
2380
2381     if (ma)
2382         pa_modargs_free(ma);
2383
2384     return ret;
2385
2386 usage:
2387     pa_log("Usage: %s play_file rec_file out_file [module args] [drift_file]", argv[0]);
2388
2389 fail:
2390     ret = -1;
2391     goto out;
2392 }
2393 #endif /* ECHO_CANCEL_TEST */