diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 16:03:18 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 16:03:18 +0000 |
commit | 2dd5bc6a074165ddfbd57c4bd52c2d2dac8f47a1 (patch) | |
tree | 465b29cb405d3af0b0ad50c78e1dccc636594fec /src/modules/echo-cancel | |
parent | Initial commit. (diff) | |
download | pulseaudio-2dd5bc6a074165ddfbd57c4bd52c2d2dac8f47a1.tar.xz pulseaudio-2dd5bc6a074165ddfbd57c4bd52c2d2dac8f47a1.zip |
Adding upstream version 14.2.upstream/14.2upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/modules/echo-cancel')
-rw-r--r-- | src/modules/echo-cancel/adrian-aec.c | 287 | ||||
-rw-r--r-- | src/modules/echo-cancel/adrian-aec.h | 383 | ||||
-rw-r--r-- | src/modules/echo-cancel/adrian-aec.orc | 8 | ||||
-rw-r--r-- | src/modules/echo-cancel/adrian-license.txt | 17 | ||||
-rw-r--r-- | src/modules/echo-cancel/adrian.c | 118 | ||||
-rw-r--r-- | src/modules/echo-cancel/adrian.h | 30 | ||||
-rw-r--r-- | src/modules/echo-cancel/echo-cancel.h | 189 | ||||
-rw-r--r-- | src/modules/echo-cancel/meson.build | 22 | ||||
-rw-r--r-- | src/modules/echo-cancel/module-echo-cancel.c | 2393 | ||||
-rw-r--r-- | src/modules/echo-cancel/null.c | 56 | ||||
-rw-r--r-- | src/modules/echo-cancel/speex.c | 237 | ||||
-rw-r--r-- | src/modules/echo-cancel/webrtc.cc | 594 |
12 files changed, 4334 insertions, 0 deletions
diff --git a/src/modules/echo-cancel/adrian-aec.c b/src/modules/echo-cancel/adrian-aec.c new file mode 100644 index 0000000..215ea76 --- /dev/null +++ b/src/modules/echo-cancel/adrian-aec.c @@ -0,0 +1,287 @@ +/* aec.cpp + * + * Copyright (C) DFS Deutsche Flugsicherung (2004, 2005). + * All Rights Reserved. + * + * Acoustic Echo Cancellation NLMS-pw algorithm + * + * Version 0.3 filter created with www.dsptutor.freeuk.com + * Version 0.3.1 Allow change of stability parameter delta + * Version 0.4 Leaky Normalized LMS - pre whitening algorithm + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <math.h> +#include <string.h> +#include <stdint.h> + +#include <pulse/xmalloc.h> + +#include "adrian-aec.h" + +#ifndef DISABLE_ORC +#include "adrian-aec-orc-gen.h" +#endif + +#ifdef __SSE__ +#include <xmmintrin.h> +#endif + +/* Vector Dot Product */ +static REAL dotp(REAL a[], REAL b[]) +{ + REAL sum0 = 0.0f, sum1 = 0.0f; + int j; + + for (j = 0; j < NLMS_LEN; j += 2) { + // optimize: partial loop unrolling + sum0 += a[j] * b[j]; + sum1 += a[j + 1] * b[j + 1]; + } + return sum0 + sum1; +} + +static REAL dotp_sse(REAL a[], REAL b[]) +{ +#ifdef __SSE__ + /* This is taken from speex's inner product implementation */ + int j; + REAL sum; + __m128 acc = _mm_setzero_ps(); + + for (j=0;j<NLMS_LEN;j+=8) + { + acc = _mm_add_ps(acc, _mm_mul_ps(_mm_load_ps(a+j), _mm_loadu_ps(b+j))); + acc = _mm_add_ps(acc, _mm_mul_ps(_mm_load_ps(a+j+4), _mm_loadu_ps(b+j+4))); + } + acc = _mm_add_ps(acc, _mm_movehl_ps(acc, acc)); + acc = _mm_add_ss(acc, _mm_shuffle_ps(acc, acc, 0x55)); + _mm_store_ss(&sum, acc); + + return sum; +#else + return dotp(a, b); +#endif +} + + +AEC* AEC_init(int RATE, int have_vector) +{ + AEC *a = pa_xnew0(AEC, 1); + a->j = NLMS_EXT; + AEC_setambient(a, NoiseFloor); + a->dfast = a->dslow = M75dB_PCM; + a->xfast = a->xslow = M80dB_PCM; + a->gain = 1.0f; + a->Fx = IIR1_init(2000.0f/RATE); + a->Fe = IIR1_init(2000.0f/RATE); + a->cutoff = FIR_HP_300Hz_init(); + a->acMic = IIR_HP_init(); + a->acSpk = IIR_HP_init(); + + a->aes_y2 = M0dB; + + a->fdwdisplay = -1; + + if (have_vector) { + /* Get a 16-byte aligned location */ + a->w = (REAL *) (((uintptr_t) a->w_arr) - (((uintptr_t) a->w_arr) % 16) + 16); + a->dotp = dotp_sse; + } else { + /* We don't care about alignment, just use the array as-is */ + a->w = a->w_arr; + a->dotp = dotp; + } + + return a; +} + +void AEC_done(AEC *a) { + pa_assert(a); + + pa_xfree(a->Fx); + pa_xfree(a->Fe); + pa_xfree(a->acMic); + pa_xfree(a->acSpk); + pa_xfree(a->cutoff); + pa_xfree(a); +} + +// Adrian soft decision DTD +// (Dual Average Near-End to Far-End signal Ratio DTD) +// This algorithm uses exponential smoothing with different +// ageing parameters to get fast and slow near-end and far-end +// signal averages. The ratio of NFRs term +// (dfast / xfast) / (dslow / xslow) is used to compute the stepsize +// A ratio value of 2.5 is mapped to stepsize 0, a ratio of 0 is +// mapped to 1.0 with a limited linear function. +static float AEC_dtd(AEC *a, REAL d, REAL x) +{ + float ratio, stepsize; + + // fast near-end and far-end average + a->dfast += ALPHAFAST * (fabsf(d) - a->dfast); + a->xfast += ALPHAFAST * (fabsf(x) - a->xfast); + + // slow near-end and far-end average + a->dslow += ALPHASLOW * (fabsf(d) - a->dslow); + a->xslow += ALPHASLOW * (fabsf(x) - a->xslow); + + if (a->xfast < M70dB_PCM) { + return 0.0f; // no Spk signal + } + + if (a->dfast < M70dB_PCM) { + return 0.0f; // no Mic signal + } + + // ratio of NFRs + ratio = (a->dfast * a->xslow) / (a->dslow * a->xfast); + + // Linear interpolation with clamping at the limits + if (ratio < STEPX1) + stepsize = STEPY1; + else if (ratio > STEPX2) + stepsize = STEPY2; + else + stepsize = STEPY1 + (STEPY2 - STEPY1) * (ratio - STEPX1) / (STEPX2 - STEPX1); + + return stepsize; +} + + +static void AEC_leaky(AEC *a) +// The xfast signal is used to charge the hangover timer to Thold. +// When hangover expires (no Spk signal for some time) the vector w +// is erased. This is my implementation of Leaky NLMS. +{ + if (a->xfast >= M70dB_PCM) { + // vector w is valid for hangover Thold time + a->hangover = Thold; + } else { + if (a->hangover > 1) { + --(a->hangover); + } else if (1 == a->hangover) { + --(a->hangover); + // My Leaky NLMS is to erase vector w when hangover expires + memset(a->w_arr, 0, sizeof(a->w_arr)); + } + } +} + + +#if 0 +void AEC::openwdisplay() { + // open TCP connection to program wdisplay.tcl + fdwdisplay = socket_async("127.0.0.1", 50999); +}; +#endif + + +static REAL AEC_nlms_pw(AEC *a, REAL d, REAL x_, float stepsize) +{ + REAL e; + REAL ef; + a->x[a->j] = x_; + a->xf[a->j] = IIR1_highpass(a->Fx, x_); // pre-whitening of x + + // calculate error value + // (mic signal - estimated mic signal from spk signal) + e = d; + if (a->hangover > 0) { + e -= a->dotp(a->w, a->x + a->j); + } + ef = IIR1_highpass(a->Fe, e); // pre-whitening of e + + // optimize: iterative dotp(xf, xf) + a->dotp_xf_xf += (a->xf[a->j] * a->xf[a->j] - a->xf[a->j + NLMS_LEN - 1] * a->xf[a->j + NLMS_LEN - 1]); + + if (stepsize > 0.0f) { + // calculate variable step size + REAL mikro_ef = stepsize * ef / a->dotp_xf_xf; + +#ifdef DISABLE_ORC + // update tap weights (filter learning) + int i; + for (i = 0; i < NLMS_LEN; i += 2) { + // optimize: partial loop unrolling + a->w[i] += mikro_ef * a->xf[i + a->j]; + a->w[i + 1] += mikro_ef * a->xf[i + a->j + 1]; + } +#else + update_tap_weights(a->w, &a->xf[a->j], mikro_ef, NLMS_LEN); +#endif + } + + if (--(a->j) < 0) { + // optimize: decrease number of memory copies + a->j = NLMS_EXT; + memmove(a->x + a->j + 1, a->x, (NLMS_LEN - 1) * sizeof(REAL)); + memmove(a->xf + a->j + 1, a->xf, (NLMS_LEN - 1) * sizeof(REAL)); + } + + // Saturation + if (e > MAXPCM) { + return MAXPCM; + } else if (e < -MAXPCM) { + return -MAXPCM; + } else { + return e; + } +} + + +int AEC_doAEC(AEC *a, int d_, int x_) +{ + REAL d = (REAL) d_; + REAL x = (REAL) x_; + + // Mic Highpass Filter - to remove DC + d = IIR_HP_highpass(a->acMic, d); + + // Mic Highpass Filter - cut-off below 300Hz + d = FIR_HP_300Hz_highpass(a->cutoff, d); + + // Amplify, for e.g. Soundcards with -6dB max. volume + d *= a->gain; + + // Spk Highpass Filter - to remove DC + x = IIR_HP_highpass(a->acSpk, x); + + // Double Talk Detector + a->stepsize = AEC_dtd(a, d, x); + + // Leaky (ageing of vector w) + AEC_leaky(a); + + // Acoustic Echo Cancellation + d = AEC_nlms_pw(a, d, x, a->stepsize); + +#if 0 + if (fdwdisplay >= 0) { + if (++dumpcnt >= (WIDEB*RATE/10)) { + // wdisplay creates 10 dumps per seconds = large CPU load! + dumpcnt = 0; + write(fdwdisplay, ws, DUMP_LEN*sizeof(float)); + // we don't check return value. This is not production quality!!! + memset(ws, 0, sizeof(ws)); + } else { + int i; + for (i = 0; i < DUMP_LEN; i += 2) { + // optimize: partial loop unrolling + ws[i] += w[i]; + ws[i + 1] += w[i + 1]; + } + } + } +#endif + + return (int) d; +} diff --git a/src/modules/echo-cancel/adrian-aec.h b/src/modules/echo-cancel/adrian-aec.h new file mode 100644 index 0000000..3a31fd8 --- /dev/null +++ b/src/modules/echo-cancel/adrian-aec.h @@ -0,0 +1,383 @@ +/* aec.h + * + * Copyright (C) DFS Deutsche Flugsicherung (2004, 2005). + * All Rights Reserved. + * Author: Andre Adrian + * + * Acoustic Echo Cancellation Leaky NLMS-pw algorithm + * + * Version 0.3 filter created with www.dsptutor.freeuk.com + * Version 0.3.1 Allow change of stability parameter delta + * Version 0.4 Leaky Normalized LMS - pre whitening algorithm + */ + +#ifndef _AEC_H /* include only once */ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pulse/gccmacro.h> +#include <pulse/xmalloc.h> + +#include <pulsecore/macro.h> + +#define WIDEB 2 + +// use double if your CPU does software-emulation of float +#define REAL float + +/* dB Values */ +#define M0dB 1.0f +#define M3dB 0.71f +#define M6dB 0.50f +#define M9dB 0.35f +#define M12dB 0.25f +#define M18dB 0.125f +#define M24dB 0.063f + +/* dB values for 16bit PCM */ +/* MxdB_PCM = 32767 * 10 ^(x / 20) */ +#define M10dB_PCM 10362.0f +#define M20dB_PCM 3277.0f +#define M25dB_PCM 1843.0f +#define M30dB_PCM 1026.0f +#define M35dB_PCM 583.0f +#define M40dB_PCM 328.0f +#define M45dB_PCM 184.0f +#define M50dB_PCM 104.0f +#define M55dB_PCM 58.0f +#define M60dB_PCM 33.0f +#define M65dB_PCM 18.0f +#define M70dB_PCM 10.0f +#define M75dB_PCM 6.0f +#define M80dB_PCM 3.0f +#define M85dB_PCM 2.0f +#define M90dB_PCM 1.0f + +#define MAXPCM 32767.0f + +/* Design constants (Change to fine tune the algorithms */ + +/* The following values are for hardware AEC and studio quality + * microphone */ + +/* NLMS filter length in taps (samples). A longer filter length gives + * better Echo Cancellation, but maybe slower convergence speed and + * needs more CPU power (Order of NLMS is linear) */ +#define NLMS_LEN (100*WIDEB*8) + +/* Vector w visualization length in taps (samples). + * Must match argv value for wdisplay.tcl */ +#define DUMP_LEN (40*WIDEB*8) + +/* minimum energy in xf. Range: M70dB_PCM to M50dB_PCM. Should be equal + * to microphone ambient Noise level */ +#define NoiseFloor M55dB_PCM + +/* Leaky hangover in taps. + */ +#define Thold (60 * WIDEB * 8) + +// Adrian soft decision DTD +// left point. X is ratio, Y is stepsize +#define STEPX1 1.0 +#define STEPY1 1.0 +// right point. STEPX2=2.0 is good double talk, 3.0 is good single talk. +#define STEPX2 2.5 +#define STEPY2 0 +#define ALPHAFAST (1.0f / 100.0f) +#define ALPHASLOW (1.0f / 20000.0f) + + + +/* Ageing multiplier for LMS memory vector w */ +#define Leaky 0.9999f + +/* Double Talk Detector Speaker/Microphone Threshold. Range <=1 + * Large value (M0dB) is good for Single-Talk Echo cancellation, + * small value (M12dB) is good for Double-Talk AEC */ +#define GeigelThreshold M6dB + +/* for Non Linear Processor. Range >0 to 1. Large value (M0dB) is good + * for Double-Talk, small value (M12dB) is good for Single-Talk */ +#define NLPAttenuation M12dB + +/* Below this line there are no more design constants */ + +typedef struct IIR_HP IIR_HP; + +/* Exponential Smoothing or IIR Infinite Impulse Response Filter */ +struct IIR_HP { + REAL x; +}; + +static IIR_HP* IIR_HP_init(void) { + IIR_HP *i = pa_xnew(IIR_HP, 1); + i->x = 0.0f; + return i; + } + +static REAL IIR_HP_highpass(IIR_HP *i, REAL in) { + const REAL a0 = 0.01f; /* controls Transfer Frequency */ + /* Highpass = Signal - Lowpass. Lowpass = Exponential Smoothing */ + i->x += a0 * (in - i->x); + return in - i->x; + } + +typedef struct FIR_HP_300Hz FIR_HP_300Hz; + +#if WIDEB==1 +/* 17 taps FIR Finite Impulse Response filter + * Coefficients calculated with + * www.dsptutor.freeuk.com/KaiserFilterDesign/KaiserFilterDesign.html + */ +class FIR_HP_300Hz { + REAL z[18]; + +public: + FIR_HP_300Hz() { + memset(this, 0, sizeof(FIR_HP_300Hz)); + } + + REAL highpass(REAL in) { + const REAL a[18] = { + // Kaiser Window FIR Filter, Filter type: High pass + // Passband: 300.0 - 4000.0 Hz, Order: 16 + // Transition band: 75.0 Hz, Stopband attenuation: 10.0 dB + -0.034870606, -0.039650206, -0.044063766, -0.04800318, + -0.051370874, -0.054082647, -0.056070227, -0.057283327, + 0.8214126, -0.057283327, -0.056070227, -0.054082647, + -0.051370874, -0.04800318, -0.044063766, -0.039650206, + -0.034870606, 0.0 + }; + memmove(z + 1, z, 17 * sizeof(REAL)); + z[0] = in; + REAL sum0 = 0.0, sum1 = 0.0; + int j; + + for (j = 0; j < 18; j += 2) { + // optimize: partial loop unrolling + sum0 += a[j] * z[j]; + sum1 += a[j + 1] * z[j + 1]; + } + return sum0 + sum1; + } +}; + +#else + +/* 35 taps FIR Finite Impulse Response filter + * Passband 150Hz to 4kHz for 8kHz sample rate, 300Hz to 8kHz for 16kHz + * sample rate. + * Coefficients calculated with + * www.dsptutor.freeuk.com/KaiserFilterDesign/KaiserFilterDesign.html + */ +struct FIR_HP_300Hz { + REAL z[36]; +}; + +static FIR_HP_300Hz* FIR_HP_300Hz_init(void) { + FIR_HP_300Hz *ret = pa_xnew(FIR_HP_300Hz, 1); + memset(ret, 0, sizeof(FIR_HP_300Hz)); + return ret; + } + +static REAL FIR_HP_300Hz_highpass(FIR_HP_300Hz *f, REAL in) { + REAL sum0 = 0.0, sum1 = 0.0; + int j; + const REAL a[36] = { + // Kaiser Window FIR Filter, Filter type: High pass + // Passband: 150.0 - 4000.0 Hz, Order: 34 + // Transition band: 34.0 Hz, Stopband attenuation: 10.0 dB + -0.016165324, -0.017454365, -0.01871232, -0.019931411, + -0.021104068, -0.022222936, -0.02328091, -0.024271343, + -0.025187887, -0.02602462, -0.026776174, -0.027437767, + -0.028004972, -0.028474221, -0.028842418, -0.029107114, + -0.02926664, 0.8524841, -0.02926664, -0.029107114, + -0.028842418, -0.028474221, -0.028004972, -0.027437767, + -0.026776174, -0.02602462, -0.025187887, -0.024271343, + -0.02328091, -0.022222936, -0.021104068, -0.019931411, + -0.01871232, -0.017454365, -0.016165324, 0.0 + }; + memmove(f->z + 1, f->z, 35 * sizeof(REAL)); + f->z[0] = in; + + for (j = 0; j < 36; j += 2) { + // optimize: partial loop unrolling + sum0 += a[j] * f->z[j]; + sum1 += a[j + 1] * f->z[j + 1]; + } + return sum0 + sum1; + } +#endif + +typedef struct IIR1 IIR1; + +/* Recursive single pole IIR Infinite Impulse response High-pass filter + * + * Reference: The Scientist and Engineer's Guide to Digital Processing + * + * output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1] + * + * X = exp(-2.0 * pi * Fc) + * A0 = (1 + X) / 2 + * A1 = -(1 + X) / 2 + * B1 = X + * Fc = cutoff freq / sample rate + */ +struct IIR1 { + REAL in0, out0; + REAL a0, a1, b1; +}; + +#if 0 + IIR1() { + memset(this, 0, sizeof(IIR1)); + } +#endif + +static IIR1* IIR1_init(REAL Fc) { + IIR1 *i = pa_xnew(IIR1, 1); + i->b1 = expf(-2.0f * M_PI * Fc); + i->a0 = (1.0f + i->b1) / 2.0f; + i->a1 = -(i->a0); + i->in0 = 0.0f; + i->out0 = 0.0f; + return i; + } + +static REAL IIR1_highpass(IIR1 *i, REAL in) { + REAL out = i->a0 * in + i->a1 * i->in0 + i->b1 * i->out0; + i->in0 = in; + i->out0 = out; + return out; + } + + +#if 0 +/* Recursive two pole IIR Infinite Impulse Response filter + * Coefficients calculated with + * http://www.dsptutor.freeuk.com/IIRFilterDesign/IIRFiltDes102.html + */ +class IIR2 { + REAL x[2], y[2]; + +public: + IIR2() { + memset(this, 0, sizeof(IIR2)); + } + + REAL highpass(REAL in) { + // Butterworth IIR filter, Filter type: HP + // Passband: 2000 - 4000.0 Hz, Order: 2 + const REAL a[] = { 0.29289323f, -0.58578646f, 0.29289323f }; + const REAL b[] = { 1.3007072E-16f, 0.17157288f }; + REAL out = + a[0] * in + a[1] * x[0] + a[2] * x[1] - b[0] * y[0] - b[1] * y[1]; + + x[1] = x[0]; + x[0] = in; + y[1] = y[0]; + y[0] = out; + return out; + } +}; +#endif + + +// Extension in taps to reduce mem copies +#define NLMS_EXT (10*8) + +// block size in taps to optimize DTD calculation +#define DTD_LEN 16 + +typedef struct AEC AEC; + +struct AEC { + // Time domain Filters + IIR_HP *acMic, *acSpk; // DC-level remove Highpass) + FIR_HP_300Hz *cutoff; // 150Hz cut-off Highpass + REAL gain; // Mic signal amplify + IIR1 *Fx, *Fe; // pre-whitening Highpass for x, e + + // Adrian soft decision DTD (Double Talk Detector) + REAL dfast, xfast; + REAL dslow, xslow; + + // NLMS-pw + REAL x[NLMS_LEN + NLMS_EXT]; // tap delayed loudspeaker signal + REAL xf[NLMS_LEN + NLMS_EXT]; // pre-whitening tap delayed signal + REAL w_arr[NLMS_LEN + (16 / sizeof(REAL))]; // tap weights + REAL *w; // this will be a 16-byte aligned pointer into w_arr + int j; // optimize: less memory copies + double dotp_xf_xf; // double to avoid loss of precision + float delta; // noise floor to stabilize NLMS + + // AES + float aes_y2; // not in use! + + // w vector visualization + REAL ws[DUMP_LEN]; // tap weights sums + int fdwdisplay; // TCP file descriptor + int dumpcnt; // wdisplay output counter + + // variables are public for visualization + int hangover; + float stepsize; + + // vfuncs that are picked based on processor features available + REAL (*dotp) (REAL[], REAL[]); +}; + +/* Double-Talk Detector + * + * in d: microphone sample (PCM as REALing point value) + * in x: loudspeaker sample (PCM as REALing point value) + * return: from 0 for doubletalk to 1.0 for single talk + */ +static float AEC_dtd(AEC *a, REAL d, REAL x); + +static void AEC_leaky(AEC *a); + +/* Normalized Least Mean Square Algorithm pre-whitening (NLMS-pw) + * The LMS algorithm was developed by Bernard Widrow + * book: Haykin, Adaptive Filter Theory, 4. edition, Prentice Hall, 2002 + * + * in d: microphone sample (16bit PCM value) + * in x_: loudspeaker sample (16bit PCM value) + * in stepsize: NLMS adaptation variable + * return: echo cancelled microphone sample + */ +static REAL AEC_nlms_pw(AEC *a, REAL d, REAL x_, float stepsize); + +AEC* AEC_init(int RATE, int have_vector); +void AEC_done(AEC *a); + +/* Acoustic Echo Cancellation and Suppression of one sample + * in d: microphone signal with echo + * in x: loudspeaker signal + * return: echo cancelled microphone signal + */ + int AEC_doAEC(AEC *a, int d_, int x_); + +PA_GCC_UNUSED static float AEC_getambient(AEC *a) { + return a->dfast; + } +static void AEC_setambient(AEC *a, float Min_xf) { + a->dotp_xf_xf -= a->delta; // subtract old delta + a->delta = (NLMS_LEN-1) * Min_xf * Min_xf; + a->dotp_xf_xf += a->delta; // add new delta + } +PA_GCC_UNUSED static void AEC_setgain(AEC *a, float gain_) { + a->gain = gain_; + } +#if 0 + void AEC_openwdisplay(AEC *a); +#endif +PA_GCC_UNUSED static void AEC_setaes(AEC *a, float aes_y2_) { + a->aes_y2 = aes_y2_; + } + +#define _AEC_H +#endif diff --git a/src/modules/echo-cancel/adrian-aec.orc b/src/modules/echo-cancel/adrian-aec.orc new file mode 100644 index 0000000..8054772 --- /dev/null +++ b/src/modules/echo-cancel/adrian-aec.orc @@ -0,0 +1,8 @@ +.function update_tap_weights +.dest 4 w float +.source 4 xf float +.floatparam 4 mikro_ef +.temp 4 tmp float + +mulf tmp, mikro_ef, xf +addf w, w, tmp diff --git a/src/modules/echo-cancel/adrian-license.txt b/src/modules/echo-cancel/adrian-license.txt new file mode 100644 index 0000000..7c06efd --- /dev/null +++ b/src/modules/echo-cancel/adrian-license.txt @@ -0,0 +1,17 @@ + Copyright (C) DFS Deutsche Flugsicherung (2004). All Rights Reserved. + + You are allowed to use this source code in any open source or closed + source software you want. You are allowed to use the algorithms for a + hardware solution. You are allowed to modify the source code. + You are not allowed to remove the name of the author from this memo or + from the source code files. You are not allowed to monopolize the + source code or the algorithms behind the source code as your + intellectual property. This source code is free of royalty and comes + with no warranty. + +--- The following does not apply to the PulseAudio module --- + + Please see g711/gen-lic.txt for the ITU-T G.711 codec copyright. + Please see gsm/gen-lic.txt for the ITU-T GSM codec copyright. + Please see ilbc/COPYRIGHT and ilbc/NOTICE for the IETF iLBC codec + copyright. diff --git a/src/modules/echo-cancel/adrian.c b/src/modules/echo-cancel/adrian.c new file mode 100644 index 0000000..3c47fae --- /dev/null +++ b/src/modules/echo-cancel/adrian.c @@ -0,0 +1,118 @@ +/*** + This file is part of PulseAudio. + + Copyright 2010 Arun Raghavan <arun.raghavan@collabora.co.uk> + + Contributor: Wim Taymans <wim.taymans@gmail.com> + + The actual implementation is taken from the sources at + http://andreadrian.de/intercom/ - for the license, look for + adrian-license.txt in the same directory as this file. + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. +***/ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pulse/xmalloc.h> + +#include <pulsecore/modargs.h> + +#include "echo-cancel.h" + +/* should be between 10-20 ms */ +#define DEFAULT_FRAME_SIZE_MS 20 + +static const char* const valid_modargs[] = { + "frame_size_ms", + NULL +}; + +static void pa_adrian_ec_fixate_spec(pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map) { + out_ss->format = PA_SAMPLE_S16NE; + out_ss->channels = 1; + pa_channel_map_init_mono(out_map); + + *play_ss = *out_ss; + *play_map = *out_map; + *rec_ss = *out_ss; + *rec_map = *out_map; +} + +bool pa_adrian_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args) { + int rate, have_vector = 0; + uint32_t frame_size_ms; + pa_modargs *ma; + + if (!(ma = pa_modargs_new(args, valid_modargs))) { + pa_log("Failed to parse submodule arguments."); + goto fail; + } + + frame_size_ms = DEFAULT_FRAME_SIZE_MS; + if (pa_modargs_get_value_u32(ma, "frame_size_ms", &frame_size_ms) < 0 || frame_size_ms < 1 || frame_size_ms > 200) { + pa_log("Invalid frame_size_ms specification"); + goto fail; + } + + pa_adrian_ec_fixate_spec(rec_ss, rec_map, play_ss, play_map, out_ss, out_map); + + rate = out_ss->rate; + *nframes = (rate * frame_size_ms) / 1000; + ec->params.adrian.blocksize = (*nframes) * pa_frame_size(out_ss); + + pa_log_debug ("Using nframes %d, blocksize %u, channels %d, rate %d", *nframes, ec->params.adrian.blocksize, out_ss->channels, out_ss->rate); + + /* For now we only support SSE */ + if (c->cpu_info.cpu_type == PA_CPU_X86 && (c->cpu_info.flags.x86 & PA_CPU_X86_SSE)) + have_vector = 1; + + ec->params.adrian.aec = AEC_init(rate, have_vector); + if (!ec->params.adrian.aec) + goto fail; + + pa_modargs_free(ma); + return true; + +fail: + if (ma) + pa_modargs_free(ma); + return false; +} + +void pa_adrian_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { + unsigned int i; + + for (i = 0; i < ec->params.adrian.blocksize; i += 2) { + /* We know it's S16NE mono data */ + int r = *(int16_t *)(rec + i); + int p = *(int16_t *)(play + i); + *(int16_t *)(out + i) = (int16_t) AEC_doAEC(ec->params.adrian.aec, r, p); + } +} + +void pa_adrian_ec_done(pa_echo_canceller *ec) { + if (ec->params.adrian.aec) { + AEC_done(ec->params.adrian.aec); + ec->params.adrian.aec = NULL; + } +} diff --git a/src/modules/echo-cancel/adrian.h b/src/modules/echo-cancel/adrian.h new file mode 100644 index 0000000..a5e0444 --- /dev/null +++ b/src/modules/echo-cancel/adrian.h @@ -0,0 +1,30 @@ +/*** + This file is part of PulseAudio. + + Copyright 2010 Arun Raghavan <arun.raghavan@collabora.co.uk> + + The actual implementation is taken from the sources at + http://andreadrian.de/intercom/ - for the license, look for + adrian-license.txt in the same directory as this file. + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. +***/ + +/* Forward declarations */ + +typedef struct AEC AEC; + +AEC* AEC_init(int RATE, int have_vector); +void AEC_done(AEC *a); +int AEC_doAEC(AEC *a, int d_, int x_); diff --git a/src/modules/echo-cancel/echo-cancel.h b/src/modules/echo-cancel/echo-cancel.h new file mode 100644 index 0000000..ee67949 --- /dev/null +++ b/src/modules/echo-cancel/echo-cancel.h @@ -0,0 +1,189 @@ +/*** + This file is part of PulseAudio. + + Copyright 2010 Arun Raghavan <arun.raghavan@collabora.co.uk> + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. +***/ + +#ifndef fooechocancelhfoo +#define fooechocancelhfoo + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pulse/sample.h> +#include <pulse/channelmap.h> +#include <pulsecore/core.h> +#include <pulsecore/macro.h> + +#ifdef HAVE_SPEEX +#include <speex/speex_echo.h> +#include <speex/speex_preprocess.h> +#endif + +#include "adrian.h" + +/* Common data structures */ + +typedef struct pa_echo_canceller_msg pa_echo_canceller_msg; + +typedef struct pa_echo_canceller_params pa_echo_canceller_params; + +struct pa_echo_canceller_params { + union { + struct { + pa_sample_spec out_ss; + } null; +#ifdef HAVE_SPEEX + struct { + SpeexEchoState *state; + SpeexPreprocessState *pp_state; + } speex; +#endif +#ifdef HAVE_ADRIAN_EC + struct { + uint32_t blocksize; + AEC *aec; + } adrian; +#endif +#ifdef HAVE_WEBRTC + struct { + /* This is a void* so that we don't have to convert this whole file + * to C++ linkage. apm is a pointer to an AudioProcessing object */ + void *apm; + unsigned int blocksize; /* in frames */ + pa_sample_spec rec_ss, play_ss, out_ss; + float *rec_buffer[PA_CHANNELS_MAX], *play_buffer[PA_CHANNELS_MAX]; /* for deinterleaved buffers */ + void *trace_callback; + bool agc; + bool first; + unsigned int agc_start_volume; + } webrtc; +#endif + /* each canceller-specific structure goes here */ + }; + + /* Set this if canceller can do drift compensation. Also see set_drift() + * below */ + bool drift_compensation; +}; + +typedef struct pa_echo_canceller pa_echo_canceller; + +struct pa_echo_canceller { + /* Initialise canceller engine. */ + bool (*init) (pa_core *c, + pa_echo_canceller *ec, + pa_sample_spec *rec_ss, + pa_channel_map *rec_map, + pa_sample_spec *play_ss, + pa_channel_map *play_map, + pa_sample_spec *out_ss, + pa_channel_map *out_map, + uint32_t *nframes, + const char *args); + + /* You should have only one of play()+record() or run() set. The first + * works under the assumption that you'll handle buffering and matching up + * samples yourself. If you set run(), module-echo-cancel will handle + * synchronising the playback and record streams. */ + + /* Feed the engine 'nframes' playback frames. */ + void (*play) (pa_echo_canceller *ec, const uint8_t *play); + /* Feed the engine 'nframes' record frames. nframes processed frames are + * returned in out. */ + void (*record) (pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out); + /* Feed the engine nframes playback and record frames, with a reasonable + * effort at keeping the two in sync. nframes processed frames are + * returned in out. */ + void (*run) (pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out); + + /* Optional callback to set the drift, expressed as the ratio of the + * difference in number of playback and capture samples to the number of + * capture samples, for some instant of time. This is used only if the + * canceller signals that it supports drift compensation, and is called + * before record(). The actual implementation needs to derive drift based + * on point samples -- the individual values are not accurate enough to use + * as-is. */ + /* NOTE: the semantics of this function might change in the future. */ + void (*set_drift) (pa_echo_canceller *ec, float drift); + + /* Free up resources. */ + void (*done) (pa_echo_canceller *ec); + + /* Structure with common and engine-specific canceller parameters. */ + pa_echo_canceller_params params; + + /* msgobject that can be used to send messages back to the main thread */ + pa_echo_canceller_msg *msg; +}; + +/* Functions to be used by the canceller analog gain control routines */ +pa_volume_t pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec); +void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_volume_t volume); + +/* Computes EC block size in frames (rounded down to nearest power-of-2) based + * on sample rate and milliseconds. */ +uint32_t pa_echo_canceller_blocksize_power2(unsigned rate, unsigned ms); + +/* Null canceller functions */ +bool pa_null_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args); +void pa_null_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out); +void pa_null_ec_done(pa_echo_canceller *ec); + +#ifdef HAVE_SPEEX +/* Speex canceller functions */ +bool pa_speex_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args); +void pa_speex_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out); +void pa_speex_ec_done(pa_echo_canceller *ec); +#endif + +#ifdef HAVE_ADRIAN_EC +/* Adrian Andre's echo canceller */ +bool pa_adrian_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args); +void pa_adrian_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out); +void pa_adrian_ec_done(pa_echo_canceller *ec); +#endif + +#ifdef HAVE_WEBRTC +/* WebRTC canceller functions */ +PA_C_DECL_BEGIN +bool pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args); +void pa_webrtc_ec_play(pa_echo_canceller *ec, const uint8_t *play); +void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out); +void pa_webrtc_ec_set_drift(pa_echo_canceller *ec, float drift); +void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out); +void pa_webrtc_ec_done(pa_echo_canceller *ec); +PA_C_DECL_END +#endif + +#endif /* fooechocancelhfoo */ diff --git a/src/modules/echo-cancel/meson.build b/src/modules/echo-cancel/meson.build new file mode 100644 index 0000000..641cd35 --- /dev/null +++ b/src/modules/echo-cancel/meson.build @@ -0,0 +1,22 @@ +# The webrtc code is split off into a helper library to avoid having automake +# link module-echo-cancel with C++ (which it does if there are any C++ deps, +# even conditional ones). + +# This library requires a symbol from module-echo-cancel, hence we need +# '-Wl,--unresolved-symbols=ignore-in-object-files' otherwise it fails +# at link time. + +libwebrtc_util_sources = [ + 'webrtc.cc' +] + +libwebrtc_util = shared_library('webrtc-util', + libwebrtc_util_sources, + cpp_args : [pa_c_args, server_c_args], + include_directories : [configinc, topinc], + dependencies : [libpulse_dep, libpulsecommon_dep, libpulsecore_dep, libatomic_ops_dep, webrtc_dep, libintl_dep], + link_args : [nodelete_link_args, '-Wl,--unresolved-symbols=ignore-in-object-files'], + install : true, + install_rpath : privlibdir, + install_dir : modlibexecdir, +) diff --git a/src/modules/echo-cancel/module-echo-cancel.c b/src/modules/echo-cancel/module-echo-cancel.c new file mode 100644 index 0000000..f239492 --- /dev/null +++ b/src/modules/echo-cancel/module-echo-cancel.c @@ -0,0 +1,2393 @@ +/*** + This file is part of PulseAudio. + + Copyright 2010 Wim Taymans <wim.taymans@gmail.com> + + Based on module-virtual-sink.c + module-virtual-source.c + module-loopback.c + + Copyright 2010 Intel Corporation + Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com> + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. +***/ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <stdio.h> +#include <math.h> + +#include "echo-cancel.h" + +#include <pulse/xmalloc.h> +#include <pulse/timeval.h> +#include <pulse/rtclock.h> + +#include <pulsecore/i18n.h> +#include <pulsecore/atomic.h> +#include <pulsecore/macro.h> +#include <pulsecore/namereg.h> +#include <pulsecore/sink.h> +#include <pulsecore/module.h> +#include <pulsecore/core-rtclock.h> +#include <pulsecore/core-util.h> +#include <pulsecore/modargs.h> +#include <pulsecore/log.h> +#include <pulsecore/rtpoll.h> +#include <pulsecore/sample-util.h> +#include <pulsecore/ltdl-helper.h> + +PA_MODULE_AUTHOR("Wim Taymans"); +PA_MODULE_DESCRIPTION("Echo Cancellation"); +PA_MODULE_VERSION(PACKAGE_VERSION); +PA_MODULE_LOAD_ONCE(false); +PA_MODULE_USAGE( + _("source_name=<name for the source> " + "source_properties=<properties for the source> " + "source_master=<name of source to filter> " + "sink_name=<name for the sink> " + "sink_properties=<properties for the sink> " + "sink_master=<name of sink to filter> " + "adjust_time=<how often to readjust rates in s> " + "adjust_threshold=<how much drift to readjust after in ms> " + "format=<sample format> " + "rate=<sample rate> " + "channels=<number of channels> " + "channel_map=<channel map> " + "aec_method=<implementation to use> " + "aec_args=<parameters for the AEC engine> " + "save_aec=<save AEC data in /tmp> " + "autoloaded=<set if this module is being loaded automatically> " + "use_volume_sharing=<yes or no> " + "use_master_format=<yes or no> " + )); + +/* NOTE: Make sure the enum and ec_table are maintained in the correct order */ +typedef enum { + PA_ECHO_CANCELLER_INVALID = -1, + PA_ECHO_CANCELLER_NULL, +#ifdef HAVE_SPEEX + PA_ECHO_CANCELLER_SPEEX, +#endif +#ifdef HAVE_ADRIAN_EC + PA_ECHO_CANCELLER_ADRIAN, +#endif +#ifdef HAVE_WEBRTC + PA_ECHO_CANCELLER_WEBRTC, +#endif +} pa_echo_canceller_method_t; + +#ifdef HAVE_WEBRTC +#define DEFAULT_ECHO_CANCELLER "webrtc" +#else +#define DEFAULT_ECHO_CANCELLER "speex" +#endif + +static const pa_echo_canceller ec_table[] = { + { + /* Null, Dummy echo canceller (just copies data) */ + .init = pa_null_ec_init, + .run = pa_null_ec_run, + .done = pa_null_ec_done, + }, +#ifdef HAVE_SPEEX + { + /* Speex */ + .init = pa_speex_ec_init, + .run = pa_speex_ec_run, + .done = pa_speex_ec_done, + }, +#endif +#ifdef HAVE_ADRIAN_EC + { + /* Adrian Andre's NLMS implementation */ + .init = pa_adrian_ec_init, + .run = pa_adrian_ec_run, + .done = pa_adrian_ec_done, + }, +#endif +#ifdef HAVE_WEBRTC + { + /* WebRTC's audio processing engine */ + .init = pa_webrtc_ec_init, + .play = pa_webrtc_ec_play, + .record = pa_webrtc_ec_record, + .set_drift = pa_webrtc_ec_set_drift, + .run = pa_webrtc_ec_run, + .done = pa_webrtc_ec_done, + }, +#endif +}; + +#define DEFAULT_RATE 32000 +#define DEFAULT_CHANNELS 1 +#define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC) +#define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC) +#define DEFAULT_SAVE_AEC false +#define DEFAULT_AUTOLOADED false +#define DEFAULT_USE_MASTER_FORMAT false + +#define MEMBLOCKQ_MAXLENGTH (16*1024*1024) + +#define MAX_LATENCY_BLOCKS 10 + +/* Can only be used in main context */ +#define IS_ACTIVE(u) (((u)->source->state == PA_SOURCE_RUNNING) && \ + ((u)->sink->state == PA_SINK_RUNNING)) + +/* This module creates a new (virtual) source and sink. + * + * The data sent to the new sink is kept in a memblockq before being + * forwarded to the real sink_master. + * + * Data read from source_master is matched against the saved sink data and + * echo canceled data is then pushed onto the new source. + * + * Both source and sink masters have their own threads to push/pull data + * respectively. We however perform all our actions in the source IO thread. + * To do this we send all played samples to the source IO thread where they + * are then pushed into the memblockq. + * + * Alignment is performed in two steps: + * + * 1) when something happens that requires quick adjustment of the alignment of + * capture and playback samples, we perform a resync. This adjusts the + * position in the playback memblock to the requested sample. Quick + * adjustments include moving the playback samples before the capture + * samples (because else the echo canceller does not work) or when the + * playback pointer drifts too far away. + * + * 2) periodically check the difference between capture and playback. We use a + * low and high watermark for adjusting the alignment. Playback should always + * be before capture and the difference should not be bigger than one frame + * size. We would ideally like to resample the sink_input but most driver + * don't give enough accuracy to be able to do that right now. + */ + +struct userdata; + +struct pa_echo_canceller_msg { + pa_msgobject parent; + bool dead; + struct userdata *userdata; +}; + +PA_DEFINE_PRIVATE_CLASS(pa_echo_canceller_msg, pa_msgobject); +#define PA_ECHO_CANCELLER_MSG(o) (pa_echo_canceller_msg_cast(o)) + +struct snapshot { + pa_usec_t sink_now; + pa_usec_t sink_latency; + size_t sink_delay; + int64_t send_counter; + + pa_usec_t source_now; + pa_usec_t source_latency; + size_t source_delay; + int64_t recv_counter; + size_t rlen; + size_t plen; +}; + +struct userdata { + pa_core *core; + pa_module *module; + + bool dead; + bool save_aec; + + pa_echo_canceller *ec; + uint32_t source_output_blocksize; + uint32_t source_blocksize; + uint32_t sink_blocksize; + + bool need_realign; + + /* to wakeup the source I/O thread */ + pa_asyncmsgq *asyncmsgq; + pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write; + + pa_source *source; + bool source_auto_desc; + pa_source_output *source_output; + pa_memblockq *source_memblockq; /* echo canceller needs fixed sized chunks */ + size_t source_skip; + + pa_sink *sink; + bool sink_auto_desc; + pa_sink_input *sink_input; + pa_memblockq *sink_memblockq; + int64_t send_counter; /* updated in sink IO thread */ + int64_t recv_counter; + size_t sink_skip; + + /* Bytes left over from previous iteration */ + size_t sink_rem; + size_t source_rem; + + pa_atomic_t request_resync; + + pa_time_event *time_event; + pa_usec_t adjust_time; + int adjust_threshold; + + FILE *captured_file; + FILE *played_file; + FILE *canceled_file; + FILE *drift_file; + + bool use_volume_sharing; + + struct { + pa_cvolume current_volume; + } thread_info; +}; + +static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot); + +static const char* const valid_modargs[] = { + "source_name", + "source_properties", + "source_master", + "sink_name", + "sink_properties", + "sink_master", + "adjust_time", + "adjust_threshold", + "format", + "rate", + "channels", + "channel_map", + "aec_method", + "aec_args", + "save_aec", + "autoloaded", + "use_volume_sharing", + "use_master_format", + NULL +}; + +enum { + SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX, + SOURCE_OUTPUT_MESSAGE_REWIND, + SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, + SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME +}; + +enum { + SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT +}; + +enum { + ECHO_CANCELLER_MESSAGE_SET_VOLUME, +}; + +static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) { + int64_t diff_time, buffer_latency; + pa_usec_t plen, rlen, source_delay, sink_delay, recv_counter, send_counter; + + /* get latency difference between playback and record */ + plen = pa_bytes_to_usec(snapshot->plen, &u->sink_input->sample_spec); + rlen = pa_bytes_to_usec(snapshot->rlen, &u->source_output->sample_spec); + if (plen > rlen) + buffer_latency = plen - rlen; + else + buffer_latency = 0; + + source_delay = pa_bytes_to_usec(snapshot->source_delay, &u->source_output->sample_spec); + sink_delay = pa_bytes_to_usec(snapshot->sink_delay, &u->sink_input->sample_spec); + buffer_latency += source_delay + sink_delay; + + /* add the latency difference due to samples not yet transferred */ + send_counter = pa_bytes_to_usec(snapshot->send_counter, &u->sink->sample_spec); + recv_counter = pa_bytes_to_usec(snapshot->recv_counter, &u->sink->sample_spec); + if (recv_counter <= send_counter) + buffer_latency += (int64_t) (send_counter - recv_counter); + else + buffer_latency = PA_CLIP_SUB(buffer_latency, (int64_t) (recv_counter - send_counter)); + + /* capture and playback are perfectly aligned when diff_time is 0 */ + diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) - + (snapshot->source_now - snapshot->source_latency); + + pa_log_debug("Diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time, + (long long) snapshot->sink_latency, + (long long) buffer_latency, (long long) snapshot->source_latency, + (long long) source_delay, (long long) sink_delay, + (long long) (send_counter - recv_counter), + (long long) (snapshot->sink_now - snapshot->source_now)); + + return diff_time; +} + +/* Called from main context */ +static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) { + struct userdata *u = userdata; + uint32_t old_rate, base_rate, new_rate; + int64_t diff_time; + /*size_t fs*/ + struct snapshot latency_snapshot; + + pa_assert(u); + pa_assert(a); + pa_assert(u->time_event == e); + pa_assert_ctl_context(); + + if (!IS_ACTIVE(u)) + return; + + /* update our snapshots */ + pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL); + pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL); + + /* calculate drift between capture and playback */ + diff_time = calc_diff(u, &latency_snapshot); + + /*fs = pa_frame_size(&u->source_output->sample_spec);*/ + old_rate = u->sink_input->sample_spec.rate; + base_rate = u->source_output->sample_spec.rate; + + if (diff_time < 0) { + /* recording before playback, we need to adjust quickly. The echo + * canceller does not work in this case. */ + pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME, + NULL, diff_time, NULL, NULL); + /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/ + new_rate = base_rate; + } + else { + if (diff_time > u->adjust_threshold) { + /* diff too big, quickly adjust */ + pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME, + NULL, diff_time, NULL, NULL); + } + + /* recording behind playback, we need to slowly adjust the rate to match */ + /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/ + + /* assume equal samplerates for now */ + new_rate = base_rate; + } + + /* make sure we don't make too big adjustments because that sounds horrible */ + if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9) + new_rate = base_rate; + + if (new_rate != old_rate) { + pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate); + + pa_sink_input_set_rate(u->sink_input, new_rate); + } + + pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); +} + +/* Called from source I/O thread context */ +static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { + struct userdata *u = PA_SOURCE(o)->userdata; + + switch (code) { + + case PA_SOURCE_MESSAGE_GET_LATENCY: + + /* The source is _put() before the source output is, so let's + * make sure we don't access it in that time. Also, the + * source output is first shut down, the source second. */ + if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) || + !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) { + *((int64_t*) data) = 0; + return 0; + } + + *((int64_t*) data) = + + /* Get the latency of the master source */ + pa_source_get_latency_within_thread(u->source_output->source, true) + + /* Add the latency internal to our source output on top */ + pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) + + /* and the buffering we do on the source */ + pa_bytes_to_usec(u->source_output_blocksize, &u->source_output->source->sample_spec); + + return 0; + + case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED: + u->thread_info.current_volume = u->source->reference_volume; + break; + } + + return pa_source_process_msg(o, code, data, offset, chunk); +} + +/* Called from sink I/O thread context */ +static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) { + struct userdata *u = PA_SINK(o)->userdata; + + switch (code) { + + case PA_SINK_MESSAGE_GET_LATENCY: + + /* The sink is _put() before the sink input is, so let's + * make sure we don't access it in that time. Also, the + * sink input is first shut down, the sink second. */ + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) { + *((int64_t*) data) = 0; + return 0; + } + + *((int64_t*) data) = + + /* Get the latency of the master sink */ + pa_sink_get_latency_within_thread(u->sink_input->sink, true) + + + /* Add the latency internal to our sink input on top */ + pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec); + + return 0; + } + + return pa_sink_process_msg(o, code, data, offset, chunk); +} + +/* Called from main context */ +static int source_set_state_in_main_thread_cb(pa_source *s, pa_source_state_t state, pa_suspend_cause_t suspend_cause) { + struct userdata *u; + + pa_source_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SOURCE_IS_LINKED(state) || + !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state)) + return 0; + + if (state == PA_SOURCE_RUNNING) { + /* restart timer when both sink and source are active */ + if ((u->sink->state == PA_SINK_RUNNING) && u->adjust_time) + pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); + + pa_atomic_store(&u->request_resync, 1); + pa_source_output_cork(u->source_output, false); + } else if (state == PA_SOURCE_SUSPENDED) { + pa_source_output_cork(u->source_output, true); + } + + return 0; +} + +/* Called from main context */ +static int sink_set_state_in_main_thread_cb(pa_sink *s, pa_sink_state_t state, pa_suspend_cause_t suspend_cause) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->state)) + return 0; + + if (state == PA_SINK_RUNNING) { + /* restart timer when both sink and source are active */ + if ((u->source->state == PA_SOURCE_RUNNING) && u->adjust_time) + pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time); + + pa_atomic_store(&u->request_resync, 1); + pa_sink_input_cork(u->sink_input, false); + } else if (state == PA_SINK_SUSPENDED) { + pa_sink_input_cork(u->sink_input, true); + } + + return 0; +} + +/* Called from the IO thread. */ +static int sink_set_state_in_io_thread_cb(pa_sink *s, pa_sink_state_t new_state, pa_suspend_cause_t new_suspend_cause) { + struct userdata *u; + + pa_assert(s); + pa_assert_se(u = s->userdata); + + /* When set to running or idle for the first time, request a rewind + * of the master sink to make sure we are heard immediately */ + if (PA_SINK_IS_OPENED(new_state) && s->thread_info.state == PA_SINK_INIT) { + pa_log_debug("Requesting rewind due to state change."); + pa_sink_input_request_rewind(u->sink_input, 0, false, true, true); + } + + return 0; +} + +/* Called from source I/O thread context */ +static void source_update_requested_latency_cb(pa_source *s) { + struct userdata *u; + pa_usec_t latency; + + pa_source_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) || + !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) + return; + + pa_log_debug("Source update requested latency"); + + /* Cap the maximum latency so we don't have to process too large chunks */ + latency = PA_MIN(pa_source_get_requested_latency_within_thread(s), + pa_bytes_to_usec(u->source_blocksize, &s->sample_spec) * MAX_LATENCY_BLOCKS); + + pa_source_output_set_requested_latency_within_thread(u->source_output, latency); +} + +/* Called from sink I/O thread context */ +static void sink_update_requested_latency_cb(pa_sink *s) { + struct userdata *u; + pa_usec_t latency; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) + return; + + pa_log_debug("Sink update requested latency"); + + /* Cap the maximum latency so we don't have to process too large chunks */ + latency = PA_MIN(pa_sink_get_requested_latency_within_thread(s), + pa_bytes_to_usec(u->sink_blocksize, &s->sample_spec) * MAX_LATENCY_BLOCKS); + + pa_sink_input_set_requested_latency_within_thread(u->sink_input, latency); +} + +/* Called from sink I/O thread context */ +static void sink_request_rewind_cb(pa_sink *s) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) + return; + + pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes); + + /* Just hand this one over to the master sink */ + pa_sink_input_request_rewind(u->sink_input, + s->thread_info.rewind_nbytes, true, false, false); +} + +/* Called from main context */ +static void source_set_volume_cb(pa_source *s) { + struct userdata *u; + + pa_source_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SOURCE_IS_LINKED(s->state) || + !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state)) + return; + + pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, true); +} + +/* Called from main context */ +static void sink_set_volume_cb(pa_sink *s) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(s->state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->state)) + return; + + pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, true); +} + +/* Called from main context. */ +static void source_get_volume_cb(pa_source *s) { + struct userdata *u; + pa_cvolume v; + + pa_source_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SOURCE_IS_LINKED(s->state) || + !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state)) + return; + + pa_source_output_get_volume(u->source_output, &v, true); + + if (pa_cvolume_equal(&s->real_volume, &v)) + /* no change */ + return; + + s->real_volume = v; + pa_source_set_soft_volume(s, NULL); +} + +/* Called from main context */ +static void source_set_mute_cb(pa_source *s) { + struct userdata *u; + + pa_source_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SOURCE_IS_LINKED(s->state) || + !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->state)) + return; + + pa_source_output_set_mute(u->source_output, s->muted, s->save_muted); +} + +/* Called from main context */ +static void sink_set_mute_cb(pa_sink *s) { + struct userdata *u; + + pa_sink_assert_ref(s); + pa_assert_se(u = s->userdata); + + if (!PA_SINK_IS_LINKED(s->state) || + !PA_SINK_INPUT_IS_LINKED(u->sink_input->state)) + return; + + pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted); +} + +/* Called from source I/O thread context. */ +static void apply_diff_time(struct userdata *u, int64_t diff_time) { + int64_t diff; + + if (diff_time < 0) { + diff = pa_usec_to_bytes(-diff_time, &u->sink_input->sample_spec); + + if (diff > 0) { + /* add some extra safety samples to compensate for jitter in the + * timings */ + diff += 10 * pa_frame_size (&u->sink_input->sample_spec); + + pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff); + + u->sink_skip = diff; + u->source_skip = 0; + } + } else if (diff_time > 0) { + diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec); + + if (diff > 0) { + pa_log("Playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff); + + u->source_skip = diff; + u->sink_skip = 0; + } + } +} + +/* Called from source I/O thread context. */ +static void do_resync(struct userdata *u) { + int64_t diff_time; + struct snapshot latency_snapshot; + + pa_log("Doing resync"); + + /* update our snapshot */ + /* 1. Get sink input latency snapshot, might cause buffers to be sent to source thread */ + pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL); + /* 2. Pick up any in-flight buffers (and discard if needed) */ + while (pa_asyncmsgq_process_one(u->asyncmsgq)) + ; + /* 3. Now get the source output latency snapshot */ + source_output_snapshot_within_thread(u, &latency_snapshot); + + /* calculate drift between capture and playback */ + diff_time = calc_diff(u, &latency_snapshot); + + /* and adjust for the drift */ + apply_diff_time(u, diff_time); +} + +/* 1. Calculate drift at this point, pass to canceller + * 2. Push out playback samples in blocksize chunks + * 3. Push out capture samples in blocksize chunks + * 4. ??? + * 5. Profit + * + * Called from source I/O thread context. + */ +static void do_push_drift_comp(struct userdata *u) { + size_t rlen, plen; + pa_memchunk rchunk, pchunk, cchunk; + uint8_t *rdata, *pdata, *cdata; + float drift; + int unused PA_GCC_UNUSED; + + rlen = pa_memblockq_get_length(u->source_memblockq); + plen = pa_memblockq_get_length(u->sink_memblockq); + + /* Estimate snapshot drift as follows: + * pd: amount of data consumed since last time + * rd: amount of data consumed since last time + * + * drift = (pd - rd) / rd; + * + * We calculate pd and rd as the memblockq length less the number of + * samples left from the last iteration (to avoid double counting + * those remainder samples. + */ + drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem)); + u->sink_rem = plen % u->sink_blocksize; + u->source_rem = rlen % u->source_output_blocksize; + + if (u->save_aec) { + if (u->drift_file) + fprintf(u->drift_file, "d %a\n", drift); + } + + /* Send in the playback samples first */ + while (plen >= u->sink_blocksize) { + pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk); + pdata = pa_memblock_acquire(pchunk.memblock); + pdata += pchunk.index; + + u->ec->play(u->ec, pdata); + + if (u->save_aec) { + if (u->drift_file) + fprintf(u->drift_file, "p %d\n", u->sink_blocksize); + if (u->played_file) + unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file); + } + + pa_memblock_release(pchunk.memblock); + pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize); + pa_memblock_unref(pchunk.memblock); + + plen -= u->sink_blocksize; + } + + /* And now the capture samples */ + while (rlen >= u->source_output_blocksize) { + pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_output_blocksize, &rchunk); + + rdata = pa_memblock_acquire(rchunk.memblock); + rdata += rchunk.index; + + cchunk.index = 0; + cchunk.length = u->source_output_blocksize; + cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length); + cdata = pa_memblock_acquire(cchunk.memblock); + + u->ec->set_drift(u->ec, drift); + u->ec->record(u->ec, rdata, cdata); + + if (u->save_aec) { + if (u->drift_file) + fprintf(u->drift_file, "c %d\n", u->source_output_blocksize); + if (u->captured_file) + unused = fwrite(rdata, 1, u->source_output_blocksize, u->captured_file); + if (u->canceled_file) + unused = fwrite(cdata, 1, u->source_output_blocksize, u->canceled_file); + } + + pa_memblock_release(cchunk.memblock); + pa_memblock_release(rchunk.memblock); + + pa_memblock_unref(rchunk.memblock); + + pa_source_post(u->source, &cchunk); + pa_memblock_unref(cchunk.memblock); + + pa_memblockq_drop(u->source_memblockq, u->source_output_blocksize); + rlen -= u->source_output_blocksize; + } +} + +/* This one's simpler than the drift compensation case -- we just iterate over + * the capture buffer, and pass the canceller blocksize bytes of playback and + * capture data. If playback is currently inactive, we just push silence. + * + * Called from source I/O thread context. */ +static void do_push(struct userdata *u) { + size_t rlen, plen; + pa_memchunk rchunk, pchunk, cchunk; + uint8_t *rdata, *pdata, *cdata; + int unused PA_GCC_UNUSED; + + rlen = pa_memblockq_get_length(u->source_memblockq); + plen = pa_memblockq_get_length(u->sink_memblockq); + + while (rlen >= u->source_output_blocksize) { + + /* take fixed blocks from recorded and played samples */ + pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_output_blocksize, &rchunk); + pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk); + + /* we ran out of played data and pchunk has been filled with silence bytes */ + if (plen < u->sink_blocksize) + pa_memblockq_seek(u->sink_memblockq, u->sink_blocksize - plen, PA_SEEK_RELATIVE, true); + + rdata = pa_memblock_acquire(rchunk.memblock); + rdata += rchunk.index; + pdata = pa_memblock_acquire(pchunk.memblock); + pdata += pchunk.index; + + cchunk.index = 0; + cchunk.length = u->source_blocksize; + cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length); + cdata = pa_memblock_acquire(cchunk.memblock); + + if (u->save_aec) { + if (u->captured_file) + unused = fwrite(rdata, 1, u->source_output_blocksize, u->captured_file); + if (u->played_file) + unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file); + } + + /* perform echo cancellation */ + u->ec->run(u->ec, rdata, pdata, cdata); + + if (u->save_aec) { + if (u->canceled_file) + unused = fwrite(cdata, 1, u->source_blocksize, u->canceled_file); + } + + pa_memblock_release(cchunk.memblock); + pa_memblock_release(pchunk.memblock); + pa_memblock_release(rchunk.memblock); + + /* drop consumed source samples */ + pa_memblockq_drop(u->source_memblockq, u->source_output_blocksize); + pa_memblock_unref(rchunk.memblock); + rlen -= u->source_output_blocksize; + + /* drop consumed sink samples */ + pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize); + pa_memblock_unref(pchunk.memblock); + + if (plen >= u->sink_blocksize) + plen -= u->sink_blocksize; + else + plen = 0; + + /* forward the (echo-canceled) data to the virtual source */ + pa_source_post(u->source, &cchunk); + pa_memblock_unref(cchunk.memblock); + } +} + +/* Called from source I/O thread context. */ +static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) { + struct userdata *u; + size_t rlen, plen, to_skip; + pa_memchunk rchunk; + + pa_source_output_assert_ref(o); + pa_source_output_assert_io_context(o); + pa_assert_se(u = o->userdata); + + if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state)) + return; + + if (!PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) { + pa_log("Push when no link?"); + return; + } + + /* handle queued messages, do any message sending of our own */ + while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0) + ; + + pa_memblockq_push_align(u->source_memblockq, chunk); + + rlen = pa_memblockq_get_length(u->source_memblockq); + plen = pa_memblockq_get_length(u->sink_memblockq); + + /* Let's not do anything else till we have enough data to process */ + if (rlen < u->source_output_blocksize) + return; + + /* See if we need to drop samples in order to sync */ + if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) { + do_resync(u); + } + + /* Okay, skip cancellation for skipped source samples if needed. */ + if (PA_UNLIKELY(u->source_skip)) { + /* The slightly tricky bit here is that we drop all but modulo + * blocksize bytes and then adjust for that last bit on the sink side. + * We do this because the source data is coming at a fixed rate, which + * means the only way to try to catch up is drop sink samples and let + * the canceller cope up with this. */ + to_skip = rlen >= u->source_skip ? u->source_skip : rlen; + to_skip -= to_skip % u->source_output_blocksize; + + if (to_skip) { + pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk); + pa_source_post(u->source, &rchunk); + + pa_memblock_unref(rchunk.memblock); + pa_memblockq_drop(u->source_memblockq, to_skip); + + rlen -= to_skip; + u->source_skip -= to_skip; + } + + if (rlen && u->source_skip % u->source_output_blocksize) { + u->sink_skip += (uint64_t) (u->source_output_blocksize - (u->source_skip % u->source_output_blocksize)) * u->sink_blocksize / u->source_output_blocksize; + u->source_skip -= (u->source_skip % u->source_output_blocksize); + } + } + + /* And for the sink, these samples have been played back already, so we can + * just drop them and get on with it. */ + if (PA_UNLIKELY(u->sink_skip)) { + to_skip = plen >= u->sink_skip ? u->sink_skip : plen; + + pa_memblockq_drop(u->sink_memblockq, to_skip); + + plen -= to_skip; + u->sink_skip -= to_skip; + } + + /* process and push out samples */ + if (u->ec->params.drift_compensation) + do_push_drift_comp(u); + else + do_push(u); +} + +/* Called from sink I/O thread context. */ +static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert(chunk); + pa_assert_se(u = i->userdata); + + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) + return -1; + + if (u->sink->thread_info.rewind_requested) + pa_sink_process_rewind(u->sink, 0); + + pa_sink_render_full(u->sink, nbytes, chunk); + + if (i->thread_info.underrun_for > 0) { + pa_log_debug("Handling end of underrun."); + pa_atomic_store(&u->request_resync, 1); + } + + /* let source thread handle the chunk. pass the sample count as well so that + * the source IO thread can update the right variables. */ + pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST, + NULL, 0, chunk, NULL); + u->send_counter += chunk->length; + + return 0; +} + +/* Called from source I/O thread context. */ +static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_source_output_assert_io_context(o); + pa_assert_se(u = o->userdata); + + /* If the source is not yet linked, there is nothing to rewind */ + if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state)) + return; + + pa_source_process_rewind(u->source, nbytes); + + /* go back on read side, we need to use older sink data for this */ + pa_memblockq_rewind(u->sink_memblockq, nbytes); + + /* manipulate write index */ + pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, true); + + pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes, + (long long) pa_memblockq_get_length (u->source_memblockq)); +} + +/* Called from sink I/O thread context. */ +static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + /* If the sink is not yet linked, there is nothing to rewind */ + if (!PA_SINK_IS_LINKED(u->sink->thread_info.state)) + return; + + pa_log_debug("Sink process rewind %lld", (long long) nbytes); + + pa_sink_process_rewind(u->sink, nbytes); + + pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL); + u->send_counter -= nbytes; +} + +/* Called from source I/O thread context. */ +static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) { + size_t delay, rlen, plen; + pa_usec_t now, latency; + + now = pa_rtclock_now(); + latency = pa_source_get_latency_within_thread(u->source_output->source, false); + delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq); + + delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay); + rlen = pa_memblockq_get_length(u->source_memblockq); + plen = pa_memblockq_get_length(u->sink_memblockq); + + snapshot->source_now = now; + snapshot->source_latency = latency; + snapshot->source_delay = delay; + snapshot->recv_counter = u->recv_counter; + snapshot->rlen = rlen + u->sink_skip; + snapshot->plen = plen + u->source_skip; +} + +/* Called from source I/O thread context. */ +static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) { + struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata; + + switch (code) { + + case SOURCE_OUTPUT_MESSAGE_POST: + + pa_source_output_assert_io_context(u->source_output); + + if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING) + pa_memblockq_push_align(u->sink_memblockq, chunk); + else + pa_memblockq_flush_write(u->sink_memblockq, true); + + u->recv_counter += (int64_t) chunk->length; + + return 0; + + case SOURCE_OUTPUT_MESSAGE_REWIND: + pa_source_output_assert_io_context(u->source_output); + + /* manipulate write index, never go past what we have */ + if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state)) + pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, true); + else + pa_memblockq_flush_write(u->sink_memblockq, true); + + pa_log_debug("Sink rewind (%lld)", (long long) offset); + + u->recv_counter -= offset; + + return 0; + + case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: { + struct snapshot *snapshot = (struct snapshot *) data; + + source_output_snapshot_within_thread(u, snapshot); + return 0; + } + + case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME: + apply_diff_time(u, offset); + return 0; + + } + + return pa_source_output_process_msg(obj, code, data, offset, chunk); +} + +/* Called from sink I/O thread context. */ +static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) { + struct userdata *u = PA_SINK_INPUT(obj)->userdata; + + switch (code) { + + case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: { + size_t delay; + pa_usec_t now, latency; + struct snapshot *snapshot = (struct snapshot *) data; + + pa_sink_input_assert_io_context(u->sink_input); + + now = pa_rtclock_now(); + latency = pa_sink_get_latency_within_thread(u->sink_input->sink, false); + delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq); + + delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay); + + snapshot->sink_now = now; + snapshot->sink_latency = latency; + snapshot->sink_delay = delay; + snapshot->send_counter = u->send_counter; + return 0; + } + } + + return pa_sink_input_process_msg(obj, code, data, offset, chunk); +} + +/* Called from sink I/O thread context. */ +static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_log_debug("Sink input update max rewind %lld", (long long) nbytes); + + /* FIXME: Too small max_rewind: + * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */ + pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes); + pa_sink_set_max_rewind_within_thread(u->sink, nbytes); +} + +/* Called from source I/O thread context. */ +static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_assert_se(u = o->userdata); + + pa_log_debug("Source output update max rewind %lld", (long long) nbytes); + + pa_source_set_max_rewind_within_thread(u->source, nbytes); +} + +/* Called from sink I/O thread context. */ +static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_log_debug("Sink input update max request %lld", (long long) nbytes); + + pa_sink_set_max_request_within_thread(u->sink, nbytes); +} + +/* Called from sink I/O thread context. */ +static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) { + struct userdata *u; + pa_usec_t latency; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + latency = pa_sink_get_requested_latency_within_thread(i->sink); + + pa_log_debug("Sink input update requested latency %lld", (long long) latency); +} + +/* Called from source I/O thread context. */ +static void source_output_update_source_requested_latency_cb(pa_source_output *o) { + struct userdata *u; + pa_usec_t latency; + + pa_source_output_assert_ref(o); + pa_assert_se(u = o->userdata); + + latency = pa_source_get_requested_latency_within_thread(o->source); + + pa_log_debug("Source output update requested latency %lld", (long long) latency); +} + +/* Called from sink I/O thread context. */ +static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_log_debug("Sink input update latency range %lld %lld", + (long long) i->sink->thread_info.min_latency, + (long long) i->sink->thread_info.max_latency); + + pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency); +} + +/* Called from source I/O thread context. */ +static void source_output_update_source_latency_range_cb(pa_source_output *o) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_assert_se(u = o->userdata); + + pa_log_debug("Source output update latency range %lld %lld", + (long long) o->source->thread_info.min_latency, + (long long) o->source->thread_info.max_latency); + + pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency); +} + +/* Called from sink I/O thread context. */ +static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_log_debug("Sink input update fixed latency %lld", + (long long) i->sink->thread_info.fixed_latency); + + pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency); +} + +/* Called from source I/O thread context. */ +static void source_output_update_source_fixed_latency_cb(pa_source_output *o) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_assert_se(u = o->userdata); + + pa_log_debug("Source output update fixed latency %lld", + (long long) o->source->thread_info.fixed_latency); + + pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency); +} + +/* Called from source I/O thread context. */ +static void source_output_attach_cb(pa_source_output *o) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_source_output_assert_io_context(o); + pa_assert_se(u = o->userdata); + + pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll); + pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency); + pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency); + pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o)); + + pa_log_debug("Source output %d attach", o->index); + + if (PA_SOURCE_IS_LINKED(u->source->thread_info.state)) + pa_source_attach_within_thread(u->source); + + u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read( + o->source->thread_info.rtpoll, + PA_RTPOLL_LATE, + u->asyncmsgq); +} + +/* Called from sink I/O thread context. */ +static void sink_input_attach_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll); + pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency); + + /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE + * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */ + pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency); + + /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND + * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT + * HERE. SEE (6) */ + pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i)); + + /* FIXME: Too small max_rewind: + * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */ + pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i)); + + pa_log_debug("Sink input %d attach", i->index); + + u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write( + i->sink->thread_info.rtpoll, + PA_RTPOLL_LATE, + u->asyncmsgq); + + if (PA_SINK_IS_LINKED(u->sink->thread_info.state)) + pa_sink_attach_within_thread(u->sink); +} + +/* Called from source I/O thread context. */ +static void source_output_detach_cb(pa_source_output *o) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_source_output_assert_io_context(o); + pa_assert_se(u = o->userdata); + + if (PA_SOURCE_IS_LINKED(u->source->thread_info.state)) + pa_source_detach_within_thread(u->source); + pa_source_set_rtpoll(u->source, NULL); + + pa_log_debug("Source output %d detach", o->index); + + if (u->rtpoll_item_read) { + pa_rtpoll_item_free(u->rtpoll_item_read); + u->rtpoll_item_read = NULL; + } +} + +/* Called from sink I/O thread context. */ +static void sink_input_detach_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + if (PA_SINK_IS_LINKED(u->sink->thread_info.state)) + pa_sink_detach_within_thread(u->sink); + + pa_sink_set_rtpoll(u->sink, NULL); + + pa_log_debug("Sink input %d detach", i->index); + + if (u->rtpoll_item_write) { + pa_rtpoll_item_free(u->rtpoll_item_write); + u->rtpoll_item_write = NULL; + } +} + +/* Called from source I/O thread context except when cork() is called without valid source. */ +static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_assert_se(u = o->userdata); + + pa_log_debug("Source output %d state %d", o->index, state); +} + +/* Called from sink I/O thread context. */ +static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_log_debug("Sink input %d state %d", i->index, state); +} + +/* Called from main context. */ +static void source_output_kill_cb(pa_source_output *o) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_assert_ctl_context(); + pa_assert_se(u = o->userdata); + + u->dead = true; + + /* The order here matters! We first kill the source so that streams can + * properly be moved away while the source output is still connected to + * the master. */ + pa_source_output_cork(u->source_output, true); + pa_source_unlink(u->source); + pa_source_output_unlink(u->source_output); + + pa_source_output_unref(u->source_output); + u->source_output = NULL; + + pa_source_unref(u->source); + u->source = NULL; + + pa_log_debug("Source output kill %d", o->index); + + pa_module_unload_request(u->module, true); +} + +/* Called from main context */ +static void sink_input_kill_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + u->dead = true; + + /* The order here matters! We first kill the sink so that streams + * can properly be moved away while the sink input is still connected + * to the master. */ + pa_sink_input_cork(u->sink_input, true); + pa_sink_unlink(u->sink); + pa_sink_input_unlink(u->sink_input); + + pa_sink_input_unref(u->sink_input); + u->sink_input = NULL; + + pa_sink_unref(u->sink); + u->sink = NULL; + + pa_log_debug("Sink input kill %d", i->index); + + pa_module_unload_request(u->module, true); +} + +/* Called from main context. */ +static bool source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) { + struct userdata *u; + + pa_source_output_assert_ref(o); + pa_assert_ctl_context(); + pa_assert_se(u = o->userdata); + + if (u->dead) + return false; + + return (u->source != dest) && (u->sink != dest->monitor_of); +} + +/* Called from main context */ +static bool sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + if (u->dead) + return false; + + return u->sink != dest; +} + +/* Called from main context. */ +static void source_output_moving_cb(pa_source_output *o, pa_source *dest) { + struct userdata *u; + uint32_t idx; + pa_source_output *output; + + pa_source_output_assert_ref(o); + pa_assert_ctl_context(); + pa_assert_se(u = o->userdata); + + if (dest) { + pa_source_set_asyncmsgq(u->source, dest->asyncmsgq); + pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags); + } else + pa_source_set_asyncmsgq(u->source, NULL); + + /* Propagate asyncmsq change to attached virtual sources */ + PA_IDXSET_FOREACH(output, u->source->outputs, idx) { + if (output->destination_source && output->moving) + output->moving(output, u->source); + } + + if (u->source_auto_desc && dest) { + const char *y, *z; + pa_proplist *pl; + + pl = pa_proplist_new(); + if (u->sink_input->sink) { + pa_proplist_sets(pl, PA_PROP_DEVICE_MASTER_DEVICE, u->sink_input->sink->name); + y = pa_proplist_gets(u->sink_input->sink->proplist, PA_PROP_DEVICE_DESCRIPTION); + } else + y = "<unknown>"; /* Probably in the middle of a move */ + z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION); + pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name, + y ? y : u->sink_input->sink->name); + + pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl); + pa_proplist_free(pl); + } +} + +/* Called from main context */ +static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + if (dest) { + pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq); + pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags); + } else + pa_sink_set_asyncmsgq(u->sink, NULL); + + if (u->sink_auto_desc && dest) { + const char *y, *z; + pa_proplist *pl; + + pl = pa_proplist_new(); + if (u->source_output->source) { + pa_proplist_sets(pl, PA_PROP_DEVICE_MASTER_DEVICE, u->source_output->source->name); + y = pa_proplist_gets(u->source_output->source->proplist, PA_PROP_DEVICE_DESCRIPTION); + } else + y = "<unknown>"; /* Probably in the middle of a move */ + z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION); + pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name, + y ? y : u->source_output->source->name); + + pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl); + pa_proplist_free(pl); + } +} + +/* Called from main context */ +static void sink_input_volume_changed_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_volume_changed(u->sink, &i->volume); +} + +/* Called from main context */ +static void sink_input_mute_changed_cb(pa_sink_input *i) { + struct userdata *u; + + pa_sink_input_assert_ref(i); + pa_assert_se(u = i->userdata); + + pa_sink_mute_changed(u->sink, i->muted); +} + +/* Called from main context */ +static int canceller_process_msg_cb(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) { + struct pa_echo_canceller_msg *msg; + struct userdata *u; + + pa_assert(o); + + msg = PA_ECHO_CANCELLER_MSG(o); + + /* When the module is unloaded, there may still remain queued messages for + * the canceller. Messages are sent to the main thread using the master + * source's asyncmsgq, and that message queue isn't (and can't be, at least + * with the current asyncmsgq API) cleared from the canceller messages when + * module-echo-cancel is unloaded. + * + * The userdata may already have been freed at this point, but the + * asyncmsgq holds a reference to the pa_echo_canceller_msg object, which + * contains a flag to indicate that all remaining messages have to be + * ignored. */ + if (msg->dead) + return 0; + + u = msg->userdata; + + switch (code) { + case ECHO_CANCELLER_MESSAGE_SET_VOLUME: { + pa_volume_t v = PA_PTR_TO_UINT(userdata); + pa_cvolume vol; + + if (u->use_volume_sharing) { + pa_cvolume_set(&vol, u->source->sample_spec.channels, v); + pa_source_set_volume(u->source, &vol, true, false); + } else { + pa_cvolume_set(&vol, u->source_output->sample_spec.channels, v); + pa_source_output_set_volume(u->source_output, &vol, false, true); + } + + break; + } + + default: + pa_assert_not_reached(); + break; + } + + return 0; +} + +/* Called by the canceller, so source I/O thread context. */ +pa_volume_t pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec) { +#ifndef ECHO_CANCEL_TEST + return pa_cvolume_avg(&ec->msg->userdata->thread_info.current_volume); +#else + return PA_VOLUME_NORM; +#endif +} + +/* Called by the canceller, so source I/O thread context. */ +void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_volume_t v) { +#ifndef ECHO_CANCEL_TEST + if (pa_cvolume_avg(&ec->msg->userdata->thread_info.current_volume) != v) { + pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(ec->msg), ECHO_CANCELLER_MESSAGE_SET_VOLUME, PA_UINT_TO_PTR(v), + 0, NULL, NULL); + } +#endif +} + +uint32_t pa_echo_canceller_blocksize_power2(unsigned rate, unsigned ms) { + unsigned nframes = (rate * ms) / 1000; + uint32_t y = 1 << ((8 * sizeof(uint32_t)) - 2); + + pa_assert(rate >= 4000); + pa_assert(ms >= 1); + + /* nframes should be a power of 2, round down to nearest power of two */ + while (y > nframes) + y >>= 1; + + pa_assert(y >= 1); + return y; +} + +static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) { + if (pa_streq(method, "null")) + return PA_ECHO_CANCELLER_NULL; +#ifdef HAVE_SPEEX + if (pa_streq(method, "speex")) + return PA_ECHO_CANCELLER_SPEEX; +#endif +#ifdef HAVE_ADRIAN_EC + if (pa_streq(method, "adrian")) + return PA_ECHO_CANCELLER_ADRIAN; +#endif +#ifdef HAVE_WEBRTC + if (pa_streq(method, "webrtc")) + return PA_ECHO_CANCELLER_WEBRTC; +#endif + return PA_ECHO_CANCELLER_INVALID; +} + +/* Common initialisation bits between module-echo-cancel and the standalone + * test program. + * + * Called from main context. */ +static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) { + const char *ec_string; + pa_echo_canceller_method_t ec_method; + + if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) { + pa_log("Invalid sample format specification or channel map"); + goto fail; + } + + u->ec = pa_xnew0(pa_echo_canceller, 1); + if (!u->ec) { + pa_log("Failed to alloc echo canceller"); + goto fail; + } + + ec_string = pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER); + if ((ec_method = get_ec_method_from_string(ec_string)) < 0) { + pa_log("Invalid echo canceller implementation '%s'", ec_string); + goto fail; + } + + pa_log_info("Using AEC engine: %s", ec_string); + + u->ec->init = ec_table[ec_method].init; + u->ec->play = ec_table[ec_method].play; + u->ec->record = ec_table[ec_method].record; + u->ec->set_drift = ec_table[ec_method].set_drift; + u->ec->run = ec_table[ec_method].run; + u->ec->done = ec_table[ec_method].done; + + return 0; + +fail: + return -1; +} + +/* Called from main context. */ +int pa__init(pa_module*m) { + struct userdata *u; + pa_sample_spec source_output_ss, source_ss, sink_ss; + pa_channel_map source_output_map, source_map, sink_map; + pa_modargs *ma; + pa_source *source_master=NULL; + pa_sink *sink_master=NULL; + bool autoloaded; + pa_source_output_new_data source_output_data; + pa_sink_input_new_data sink_input_data; + pa_source_new_data source_data; + pa_sink_new_data sink_data; + pa_memchunk silence; + uint32_t temp; + uint32_t nframes = 0; + bool use_master_format; + pa_usec_t blocksize_usec; + + pa_assert(m); + + if (!(ma = pa_modargs_new(m->argument, valid_modargs))) { + pa_log("Failed to parse module arguments."); + goto fail; + } + + if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) { + pa_log("Master source not found"); + goto fail; + } + pa_assert(source_master); + + if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) { + pa_log("Master sink not found"); + goto fail; + } + pa_assert(sink_master); + + if (source_master->monitor_of == sink_master) { + pa_log("Can't cancel echo between a sink and its monitor"); + goto fail; + } + + /* Set to true if we just want to inherit sample spec and channel map from the sink and source master */ + use_master_format = DEFAULT_USE_MASTER_FORMAT; + if (pa_modargs_get_value_boolean(ma, "use_master_format", &use_master_format) < 0) { + pa_log("use_master_format= expects a boolean argument"); + goto fail; + } + + source_ss = source_master->sample_spec; + sink_ss = sink_master->sample_spec; + + if (use_master_format) { + source_map = source_master->channel_map; + sink_map = sink_master->channel_map; + } else { + source_ss = source_master->sample_spec; + source_ss.rate = DEFAULT_RATE; + source_ss.channels = DEFAULT_CHANNELS; + pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT); + + sink_ss = sink_master->sample_spec; + sink_ss.rate = DEFAULT_RATE; + sink_ss.channels = DEFAULT_CHANNELS; + pa_channel_map_init_auto(&sink_map, sink_ss.channels, PA_CHANNEL_MAP_DEFAULT); + } + + u = pa_xnew0(struct userdata, 1); + if (!u) { + pa_log("Failed to alloc userdata"); + goto fail; + } + u->core = m->core; + u->module = m; + m->userdata = u; + u->dead = false; + + u->use_volume_sharing = true; + if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &u->use_volume_sharing) < 0) { + pa_log("use_volume_sharing= expects a boolean argument"); + goto fail; + } + + temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC; + if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) { + pa_log("Failed to parse adjust_time value"); + goto fail; + } + + if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC) + u->adjust_time = temp * PA_USEC_PER_SEC; + else + u->adjust_time = DEFAULT_ADJUST_TIME_USEC; + + temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC; + if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) { + pa_log("Failed to parse adjust_threshold value"); + goto fail; + } + + if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC) + u->adjust_threshold = temp * PA_USEC_PER_MSEC; + else + u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE; + + u->save_aec = DEFAULT_SAVE_AEC; + if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) { + pa_log("Failed to parse save_aec value"); + goto fail; + } + + autoloaded = DEFAULT_AUTOLOADED; + if (pa_modargs_get_value_boolean(ma, "autoloaded", &autoloaded) < 0) { + pa_log("Failed to parse autoloaded value"); + goto fail; + } + + if (init_common(ma, u, &source_ss, &source_map) < 0) + goto fail; + + u->asyncmsgq = pa_asyncmsgq_new(0); + if (!u->asyncmsgq) { + pa_log("pa_asyncmsgq_new() failed."); + goto fail; + } + + u->need_realign = true; + + source_output_ss = source_ss; + source_output_map = source_map; + + if (sink_ss.rate != source_ss.rate) { + pa_log_info("Sample rates of play and out stream differ. Adjusting rate of play stream."); + sink_ss.rate = source_ss.rate; + } + + pa_assert(u->ec->init); + if (!u->ec->init(u->core, u->ec, &source_output_ss, &source_output_map, &sink_ss, &sink_map, &source_ss, &source_map, &nframes, pa_modargs_get_value(ma, "aec_args", NULL))) { + pa_log("Failed to init AEC engine"); + goto fail; + } + + pa_assert(source_output_ss.rate == source_ss.rate); + pa_assert(sink_ss.rate == source_ss.rate); + + u->source_output_blocksize = nframes * pa_frame_size(&source_output_ss); + u->source_blocksize = nframes * pa_frame_size(&source_ss); + u->sink_blocksize = nframes * pa_frame_size(&sink_ss); + + if (u->ec->params.drift_compensation) + pa_assert(u->ec->set_drift); + + /* Create source */ + pa_source_new_data_init(&source_data); + source_data.driver = __FILE__; + source_data.module = m; + if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL)))) + source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name); + pa_source_new_data_set_sample_spec(&source_data, &source_ss); + pa_source_new_data_set_channel_map(&source_data, &source_map); + pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name); + pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter"); + if (!autoloaded) + pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone"); + + if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) { + pa_log("Invalid properties"); + pa_source_new_data_done(&source_data); + goto fail; + } + + if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) { + const char *y, *z; + + y = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION); + z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION); + pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", + z ? z : source_master->name, y ? y : sink_master->name); + } + + u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY)) + | (u->use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0)); + pa_source_new_data_done(&source_data); + + if (!u->source) { + pa_log("Failed to create source."); + goto fail; + } + + u->source->parent.process_msg = source_process_msg_cb; + u->source->set_state_in_main_thread = source_set_state_in_main_thread_cb; + u->source->update_requested_latency = source_update_requested_latency_cb; + pa_source_set_set_mute_callback(u->source, source_set_mute_cb); + if (!u->use_volume_sharing) { + pa_source_set_get_volume_callback(u->source, source_get_volume_cb); + pa_source_set_set_volume_callback(u->source, source_set_volume_cb); + pa_source_enable_decibel_volume(u->source, true); + } + u->source->userdata = u; + + pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq); + + /* Create sink */ + pa_sink_new_data_init(&sink_data); + sink_data.driver = __FILE__; + sink_data.module = m; + if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL)))) + sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name); + pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss); + pa_sink_new_data_set_channel_map(&sink_data, &sink_map); + pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name); + pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter"); + if (!autoloaded) + pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone"); + + if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) { + pa_log("Invalid properties"); + pa_sink_new_data_done(&sink_data); + goto fail; + } + + if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) { + const char *y, *z; + + y = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION); + z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION); + pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", + z ? z : sink_master->name, y ? y : source_master->name); + } + + u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY)) + | (u->use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0)); + pa_sink_new_data_done(&sink_data); + + if (!u->sink) { + pa_log("Failed to create sink."); + goto fail; + } + + u->sink->parent.process_msg = sink_process_msg_cb; + u->sink->set_state_in_main_thread = sink_set_state_in_main_thread_cb; + u->sink->set_state_in_io_thread = sink_set_state_in_io_thread_cb; + u->sink->update_requested_latency = sink_update_requested_latency_cb; + u->sink->request_rewind = sink_request_rewind_cb; + pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb); + if (!u->use_volume_sharing) { + pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb); + pa_sink_enable_decibel_volume(u->sink, true); + } + u->sink->userdata = u; + + pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq); + + /* Create source output */ + pa_source_output_new_data_init(&source_output_data); + source_output_data.driver = __FILE__; + source_output_data.module = m; + pa_source_output_new_data_set_source(&source_output_data, source_master, false, true); + source_output_data.destination_source = u->source; + + pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream"); + pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter"); + pa_source_output_new_data_set_sample_spec(&source_output_data, &source_output_ss); + pa_source_output_new_data_set_channel_map(&source_output_data, &source_output_map); + source_output_data.flags |= PA_SOURCE_OUTPUT_START_CORKED; + + if (autoloaded) + source_output_data.flags |= PA_SOURCE_OUTPUT_DONT_MOVE; + + pa_source_output_new(&u->source_output, m->core, &source_output_data); + pa_source_output_new_data_done(&source_output_data); + + if (!u->source_output) + goto fail; + + u->source_output->parent.process_msg = source_output_process_msg_cb; + u->source_output->push = source_output_push_cb; + u->source_output->process_rewind = source_output_process_rewind_cb; + u->source_output->update_max_rewind = source_output_update_max_rewind_cb; + u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb; + u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb; + u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb; + u->source_output->kill = source_output_kill_cb; + u->source_output->attach = source_output_attach_cb; + u->source_output->detach = source_output_detach_cb; + u->source_output->state_change = source_output_state_change_cb; + u->source_output->may_move_to = source_output_may_move_to_cb; + u->source_output->moving = source_output_moving_cb; + u->source_output->userdata = u; + + u->source->output_from_master = u->source_output; + + /* Create sink input */ + pa_sink_input_new_data_init(&sink_input_data); + sink_input_data.driver = __FILE__; + sink_input_data.module = m; + pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, false, true); + sink_input_data.origin_sink = u->sink; + pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream"); + pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter"); + pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss); + pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map); + sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE | PA_SINK_INPUT_START_CORKED; + + if (autoloaded) + sink_input_data.flags |= PA_SINK_INPUT_DONT_MOVE; + + pa_sink_input_new(&u->sink_input, m->core, &sink_input_data); + pa_sink_input_new_data_done(&sink_input_data); + + if (!u->sink_input) + goto fail; + + u->sink_input->parent.process_msg = sink_input_process_msg_cb; + u->sink_input->pop = sink_input_pop_cb; + u->sink_input->process_rewind = sink_input_process_rewind_cb; + u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb; + u->sink_input->update_max_request = sink_input_update_max_request_cb; + u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb; + u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb; + u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb; + u->sink_input->kill = sink_input_kill_cb; + u->sink_input->attach = sink_input_attach_cb; + u->sink_input->detach = sink_input_detach_cb; + u->sink_input->state_change = sink_input_state_change_cb; + u->sink_input->may_move_to = sink_input_may_move_to_cb; + u->sink_input->moving = sink_input_moving_cb; + if (!u->use_volume_sharing) + u->sink_input->volume_changed = sink_input_volume_changed_cb; + u->sink_input->mute_changed = sink_input_mute_changed_cb; + u->sink_input->userdata = u; + + u->sink->input_to_master = u->sink_input; + + pa_sink_input_get_silence(u->sink_input, &silence); + + u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0, + &source_output_ss, 1, 1, 0, &silence); + u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0, + &sink_ss, 0, 1, 0, &silence); + + pa_memblock_unref(silence.memblock); + + if (!u->source_memblockq || !u->sink_memblockq) { + pa_log("Failed to create memblockq."); + goto fail; + } + + if (u->adjust_time > 0 && !u->ec->params.drift_compensation) + u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u); + else if (u->ec->params.drift_compensation) { + pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled"); + u->adjust_time = 0; + /* Perform resync just once to give the canceller a leg up */ + pa_atomic_store(&u->request_resync, 1); + } + + if (u->save_aec) { + pa_log("Creating AEC files in /tmp"); + u->captured_file = fopen("/tmp/aec_rec.sw", "wb"); + if (u->captured_file == NULL) + perror ("fopen failed"); + u->played_file = fopen("/tmp/aec_play.sw", "wb"); + if (u->played_file == NULL) + perror ("fopen failed"); + u->canceled_file = fopen("/tmp/aec_out.sw", "wb"); + if (u->canceled_file == NULL) + perror ("fopen failed"); + if (u->ec->params.drift_compensation) { + u->drift_file = fopen("/tmp/aec_drift.txt", "w"); + if (u->drift_file == NULL) + perror ("fopen failed"); + } + } + + u->ec->msg = pa_msgobject_new(pa_echo_canceller_msg); + u->ec->msg->parent.process_msg = canceller_process_msg_cb; + u->ec->msg->userdata = u; + + u->thread_info.current_volume = u->source->reference_volume; + + /* We don't want to deal with too many chunks at a time */ + blocksize_usec = pa_bytes_to_usec(u->source_blocksize, &u->source->sample_spec); + if (u->source->flags & PA_SOURCE_DYNAMIC_LATENCY) + pa_source_set_latency_range(u->source, blocksize_usec, blocksize_usec * MAX_LATENCY_BLOCKS); + pa_source_output_set_requested_latency(u->source_output, blocksize_usec * MAX_LATENCY_BLOCKS); + + blocksize_usec = pa_bytes_to_usec(u->sink_blocksize, &u->sink->sample_spec); + if (u->sink->flags & PA_SINK_DYNAMIC_LATENCY) + pa_sink_set_latency_range(u->sink, blocksize_usec, blocksize_usec * MAX_LATENCY_BLOCKS); + pa_sink_input_set_requested_latency(u->sink_input, blocksize_usec * MAX_LATENCY_BLOCKS); + + /* The order here is important. The input/output must be put first, + * otherwise streams might attach to the sink/source before the + * sink input or source output is attached to the master. */ + pa_sink_input_put(u->sink_input); + pa_source_output_put(u->source_output); + + pa_sink_put(u->sink); + pa_source_put(u->source); + + pa_source_output_cork(u->source_output, false); + pa_sink_input_cork(u->sink_input, false); + + pa_modargs_free(ma); + + return 0; + +fail: + if (ma) + pa_modargs_free(ma); + + pa__done(m); + + return -1; +} + +/* Called from main context. */ +int pa__get_n_used(pa_module *m) { + struct userdata *u; + + pa_assert(m); + pa_assert_se(u = m->userdata); + + return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source); +} + +/* Called from main context. */ +void pa__done(pa_module*m) { + struct userdata *u; + + pa_assert(m); + + if (!(u = m->userdata)) + return; + + u->dead = true; + + /* See comments in source_output_kill_cb() above regarding + * destruction order! */ + + if (u->time_event) + u->core->mainloop->time_free(u->time_event); + + if (u->source_output) + pa_source_output_cork(u->source_output, true); + if (u->sink_input) + pa_sink_input_cork(u->sink_input, true); + + if (u->source) + pa_source_unlink(u->source); + if (u->sink) + pa_sink_unlink(u->sink); + + if (u->source_output) { + pa_source_output_unlink(u->source_output); + pa_source_output_unref(u->source_output); + } + + if (u->sink_input) { + pa_sink_input_unlink(u->sink_input); + pa_sink_input_unref(u->sink_input); + } + + if (u->source) + pa_source_unref(u->source); + if (u->sink) + pa_sink_unref(u->sink); + + if (u->source_memblockq) + pa_memblockq_free(u->source_memblockq); + if (u->sink_memblockq) + pa_memblockq_free(u->sink_memblockq); + + if (u->ec) { + if (u->ec->done) + u->ec->done(u->ec); + + if (u->ec->msg) { + u->ec->msg->dead = true; + pa_echo_canceller_msg_unref(u->ec->msg); + } + + pa_xfree(u->ec); + } + + if (u->asyncmsgq) + pa_asyncmsgq_unref(u->asyncmsgq); + + if (u->save_aec) { + if (u->played_file) + fclose(u->played_file); + if (u->captured_file) + fclose(u->captured_file); + if (u->canceled_file) + fclose(u->canceled_file); + if (u->drift_file) + fclose(u->drift_file); + } + + pa_xfree(u); +} + +#ifdef ECHO_CANCEL_TEST +/* + * Stand-alone test program for running in the canceller on pre-recorded files. + */ +int main(int argc, char* argv[]) { + struct userdata u; + pa_sample_spec source_output_ss, source_ss, sink_ss; + pa_channel_map source_output_map, source_map, sink_map; + pa_modargs *ma = NULL; + uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL; + int unused PA_GCC_UNUSED; + int ret = 0, i; + char c; + float drift; + uint32_t nframes; + + if (!getenv("MAKE_CHECK")) + pa_log_set_level(PA_LOG_DEBUG); + + pa_memzero(&u, sizeof(u)); + + if (argc < 4 || argc > 7) { + goto usage; + } + + u.captured_file = fopen(argv[2], "rb"); + if (u.captured_file == NULL) { + perror ("Could not open capture file"); + goto fail; + } + u.played_file = fopen(argv[1], "rb"); + if (u.played_file == NULL) { + perror ("Could not open play file"); + goto fail; + } + u.canceled_file = fopen(argv[3], "wb"); + if (u.canceled_file == NULL) { + perror ("Could not open canceled file"); + goto fail; + } + + u.core = pa_xnew0(pa_core, 1); + u.core->cpu_info.cpu_type = PA_CPU_X86; + u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE; + + if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) { + pa_log("Failed to parse module arguments."); + goto fail; + } + + source_ss.format = PA_SAMPLE_FLOAT32LE; + source_ss.rate = DEFAULT_RATE; + source_ss.channels = DEFAULT_CHANNELS; + pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT); + + sink_ss.format = PA_SAMPLE_FLOAT32LE; + sink_ss.rate = DEFAULT_RATE; + sink_ss.channels = DEFAULT_CHANNELS; + pa_channel_map_init_auto(&sink_map, sink_ss.channels, PA_CHANNEL_MAP_DEFAULT); + + if (init_common(ma, &u, &source_ss, &source_map) < 0) + goto fail; + + source_output_ss = source_ss; + source_output_map = source_map; + + if (!u.ec->init(u.core, u.ec, &source_output_ss, &source_output_map, &sink_ss, &sink_map, &source_ss, &source_map, &nframes, + pa_modargs_get_value(ma, "aec_args", NULL))) { + pa_log("Failed to init AEC engine"); + goto fail; + } + u.source_output_blocksize = nframes * pa_frame_size(&source_output_ss); + u.source_blocksize = nframes * pa_frame_size(&source_ss); + u.sink_blocksize = nframes * pa_frame_size(&sink_ss); + + if (u.ec->params.drift_compensation) { + if (argc < 6) { + pa_log("Drift compensation enabled but drift file not specified"); + goto fail; + } + + u.drift_file = fopen(argv[5], "rt"); + + if (u.drift_file == NULL) { + perror ("Could not open drift file"); + goto fail; + } + } + + rdata = pa_xmalloc(u.source_output_blocksize); + pdata = pa_xmalloc(u.sink_blocksize); + cdata = pa_xmalloc(u.source_blocksize); + + if (!u.ec->params.drift_compensation) { + while (fread(rdata, u.source_output_blocksize, 1, u.captured_file) > 0) { + if (fread(pdata, u.sink_blocksize, 1, u.played_file) == 0) { + perror("Played file ended before captured file"); + goto fail; + } + + u.ec->run(u.ec, rdata, pdata, cdata); + + unused = fwrite(cdata, u.source_blocksize, 1, u.canceled_file); + } + } else { + while (fscanf(u.drift_file, "%c", &c) > 0) { + switch (c) { + case 'd': + if (!fscanf(u.drift_file, "%a", &drift)) { + perror("Drift file incomplete"); + goto fail; + } + + u.ec->set_drift(u.ec, drift); + + break; + + case 'c': + if (!fscanf(u.drift_file, "%d", &i)) { + perror("Drift file incomplete"); + goto fail; + } + + if (fread(rdata, i, 1, u.captured_file) <= 0) { + perror("Captured file ended prematurely"); + goto fail; + } + + u.ec->record(u.ec, rdata, cdata); + + unused = fwrite(cdata, i, 1, u.canceled_file); + + break; + + case 'p': + if (!fscanf(u.drift_file, "%d", &i)) { + perror("Drift file incomplete"); + goto fail; + } + + if (fread(pdata, i, 1, u.played_file) <= 0) { + perror("Played file ended prematurely"); + goto fail; + } + + u.ec->play(u.ec, pdata); + + break; + } + } + + if (fread(rdata, i, 1, u.captured_file) > 0) + pa_log("All capture data was not consumed"); + if (fread(pdata, i, 1, u.played_file) > 0) + pa_log("All playback data was not consumed"); + } + + u.ec->done(u.ec); + u.ec->msg->dead = true; + pa_echo_canceller_msg_unref(u.ec->msg); + +out: + if (u.captured_file) + fclose(u.captured_file); + if (u.played_file) + fclose(u.played_file); + if (u.canceled_file) + fclose(u.canceled_file); + if (u.drift_file) + fclose(u.drift_file); + + pa_xfree(rdata); + pa_xfree(pdata); + pa_xfree(cdata); + + pa_xfree(u.ec); + pa_xfree(u.core); + + if (ma) + pa_modargs_free(ma); + + return ret; + +usage: + pa_log("Usage: %s play_file rec_file out_file [module args] [drift_file]", argv[0]); + +fail: + ret = -1; + goto out; +} +#endif /* ECHO_CANCEL_TEST */ diff --git a/src/modules/echo-cancel/null.c b/src/modules/echo-cancel/null.c new file mode 100644 index 0000000..c8ecf27 --- /dev/null +++ b/src/modules/echo-cancel/null.c @@ -0,0 +1,56 @@ +/*** + Copyright 2012 Peter Meerwald <p.meerwald@bct-electronic.com> + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + +***/ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pulse/cdecl.h> + +PA_C_DECL_BEGIN +#include <pulsecore/core-util.h> +#include <pulsecore/modargs.h> +#include "echo-cancel.h" +PA_C_DECL_END + +bool pa_null_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args) { + char strss_source[PA_SAMPLE_SPEC_SNPRINT_MAX]; + char strss_sink[PA_SAMPLE_SPEC_SNPRINT_MAX]; + + *nframes = 256; + ec->params.null.out_ss = *out_ss; + + *rec_ss = *out_ss; + *rec_map = *out_map; + + pa_log_debug("null AEC: nframes=%u, sample spec source=%s, sample spec sink=%s", *nframes, + pa_sample_spec_snprint(strss_source, sizeof(strss_source), out_ss), + pa_sample_spec_snprint(strss_sink, sizeof(strss_sink), play_ss)); + + return true; +} + +void pa_null_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { + /* The null implementation simply copies the recorded buffer to the output + buffer and ignores the play buffer. */ + memcpy(out, rec, 256 * pa_frame_size(&ec->params.null.out_ss)); +} + +void pa_null_ec_done(pa_echo_canceller *ec) { +} diff --git a/src/modules/echo-cancel/speex.c b/src/modules/echo-cancel/speex.c new file mode 100644 index 0000000..794399a --- /dev/null +++ b/src/modules/echo-cancel/speex.c @@ -0,0 +1,237 @@ +/*** + This file is part of PulseAudio. + + Copyright 2010 Wim Taymans <wim.taymans@gmail.com> + + Contributor: Arun Raghavan <arun.raghavan@collabora.co.uk> + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. +***/ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pulsecore/core-util.h> +#include <pulsecore/modargs.h> +#include "echo-cancel.h" + +/* should be between 10-20 ms */ +#define DEFAULT_FRAME_SIZE_MS 20 +/* should be between 100-500 ms */ +#define DEFAULT_FILTER_SIZE_MS 200 +#define DEFAULT_AGC_ENABLED true +#define DEFAULT_DENOISE_ENABLED true +#define DEFAULT_DEREVERB_ENABLED true +#define DEFAULT_ECHO_SUPPRESS_ENABLED true +#define DEFAULT_ECHO_SUPPRESS_ATTENUATION 0 + +static const char* const valid_modargs[] = { + "frame_size_ms", + "filter_size_ms", + "agc", + "denoise", + "dereverb", + "echo_suppress", + "echo_suppress_attenuation", + "echo_suppress_attenuation_active", + NULL +}; + +static void speex_ec_fixate_spec(pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map) { + out_ss->format = PA_SAMPLE_S16NE; + + *play_ss = *out_ss; + *play_map = *out_map; + *rec_ss = *out_ss; + *rec_map = *out_map; +} + +static bool pa_speex_ec_preprocessor_init(pa_echo_canceller *ec, pa_sample_spec *out_ss, uint32_t nframes, pa_modargs *ma) { + bool agc; + bool denoise; + bool dereverb; + bool echo_suppress; + int32_t echo_suppress_attenuation; + int32_t echo_suppress_attenuation_active; + + agc = DEFAULT_AGC_ENABLED; + if (pa_modargs_get_value_boolean(ma, "agc", &agc) < 0) { + pa_log("Failed to parse agc value"); + goto fail; + } + + denoise = DEFAULT_DENOISE_ENABLED; + if (pa_modargs_get_value_boolean(ma, "denoise", &denoise) < 0) { + pa_log("Failed to parse denoise value"); + goto fail; + } + + dereverb = DEFAULT_DEREVERB_ENABLED; + if (pa_modargs_get_value_boolean(ma, "dereverb", &dereverb) < 0) { + pa_log("Failed to parse dereverb value"); + goto fail; + } + + echo_suppress = DEFAULT_ECHO_SUPPRESS_ENABLED; + if (pa_modargs_get_value_boolean(ma, "echo_suppress", &echo_suppress) < 0) { + pa_log("Failed to parse echo_suppress value"); + goto fail; + } + + echo_suppress_attenuation = DEFAULT_ECHO_SUPPRESS_ATTENUATION; + if (pa_modargs_get_value_s32(ma, "echo_suppress_attenuation", &echo_suppress_attenuation) < 0) { + pa_log("Failed to parse echo_suppress_attenuation value"); + goto fail; + } + if (echo_suppress_attenuation > 0) { + pa_log("echo_suppress_attenuation should be a negative dB value"); + goto fail; + } + + echo_suppress_attenuation_active = DEFAULT_ECHO_SUPPRESS_ATTENUATION; + if (pa_modargs_get_value_s32(ma, "echo_suppress_attenuation_active", &echo_suppress_attenuation_active) < 0) { + pa_log("Failed to parse echo_suppress_attenuation_active value"); + goto fail; + } + if (echo_suppress_attenuation_active > 0) { + pa_log("echo_suppress_attenuation_active should be a negative dB value"); + goto fail; + } + + if (agc || denoise || dereverb || echo_suppress) { + spx_int32_t tmp; + + if (out_ss->channels != 1) { + pa_log("AGC, denoising, dereverb and echo suppression only work with channels=1"); + goto fail; + } + + ec->params.speex.pp_state = speex_preprocess_state_init(nframes, out_ss->rate); + + tmp = agc; + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_AGC, &tmp); + + tmp = denoise; + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_DENOISE, &tmp); + + tmp = dereverb; + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_DEREVERB, &tmp); + + if (echo_suppress) { + if (echo_suppress_attenuation) + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS, + &echo_suppress_attenuation); + + if (echo_suppress_attenuation_active) { + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_SUPPRESS_ACTIVE, + &echo_suppress_attenuation_active); + } + } + + speex_preprocess_ctl(ec->params.speex.pp_state, SPEEX_PREPROCESS_SET_ECHO_STATE, + ec->params.speex.state); + + pa_log_info("Loaded speex preprocessor with params: agc=%s, denoise=%s, dereverb=%s, echo_suppress=%s", + pa_yes_no(agc), pa_yes_no(denoise), pa_yes_no(dereverb), pa_yes_no(echo_suppress)); + } else + pa_log_info("All preprocessing options are disabled"); + + return true; + +fail: + return false; +} + +bool pa_speex_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args) { + int rate; + uint32_t frame_size_ms, filter_size_ms; + pa_modargs *ma; + + if (!(ma = pa_modargs_new(args, valid_modargs))) { + pa_log("Failed to parse submodule arguments."); + goto fail; + } + + filter_size_ms = DEFAULT_FILTER_SIZE_MS; + if (pa_modargs_get_value_u32(ma, "filter_size_ms", &filter_size_ms) < 0 || filter_size_ms < 1 || filter_size_ms > 2000) { + pa_log("Invalid filter_size_ms specification"); + goto fail; + } + + frame_size_ms = DEFAULT_FRAME_SIZE_MS; + if (pa_modargs_get_value_u32(ma, "frame_size_ms", &frame_size_ms) < 0 || frame_size_ms < 1 || frame_size_ms > 200) { + pa_log("Invalid frame_size_ms specification"); + goto fail; + } + + speex_ec_fixate_spec(rec_ss, rec_map, play_ss, play_map, out_ss, out_map); + + rate = out_ss->rate; + *nframes = pa_echo_canceller_blocksize_power2(rate, frame_size_ms); + + pa_log_debug ("Using nframes %d, channels %d, rate %d", *nframes, out_ss->channels, out_ss->rate); + ec->params.speex.state = speex_echo_state_init_mc(*nframes, (rate * filter_size_ms) / 1000, out_ss->channels, out_ss->channels); + + if (!ec->params.speex.state) + goto fail; + + speex_echo_ctl(ec->params.speex.state, SPEEX_ECHO_SET_SAMPLING_RATE, &rate); + + if (!pa_speex_ec_preprocessor_init(ec, out_ss, *nframes, ma)) + goto fail; + + pa_modargs_free(ma); + return true; + +fail: + if (ma) + pa_modargs_free(ma); + if (ec->params.speex.pp_state) { + speex_preprocess_state_destroy(ec->params.speex.pp_state); + ec->params.speex.pp_state = NULL; + } + if (ec->params.speex.state) { + speex_echo_state_destroy(ec->params.speex.state); + ec->params.speex.state = NULL; + } + return false; +} + +void pa_speex_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { + speex_echo_cancellation(ec->params.speex.state, (const spx_int16_t *) rec, (const spx_int16_t *) play, + (spx_int16_t *) out); + + /* preprecessor is run after AEC. This is not a mistake! */ + if (ec->params.speex.pp_state) + speex_preprocess_run(ec->params.speex.pp_state, (spx_int16_t *) out); +} + +void pa_speex_ec_done(pa_echo_canceller *ec) { + if (ec->params.speex.pp_state) { + speex_preprocess_state_destroy(ec->params.speex.pp_state); + ec->params.speex.pp_state = NULL; + } + + if (ec->params.speex.state) { + speex_echo_state_destroy(ec->params.speex.state); + ec->params.speex.state = NULL; + } +} diff --git a/src/modules/echo-cancel/webrtc.cc b/src/modules/echo-cancel/webrtc.cc new file mode 100644 index 0000000..ec3ba06 --- /dev/null +++ b/src/modules/echo-cancel/webrtc.cc @@ -0,0 +1,594 @@ +/*** + This file is part of PulseAudio. + + Copyright 2011 Collabora Ltd. + 2015 Aldebaran SoftBank Group + + Contributor: Arun Raghavan <mail@arunraghavan.net> + + PulseAudio is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published + by the Free Software Foundation; either version 2.1 of the License, + or (at your option) any later version. + + PulseAudio is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with PulseAudio; if not, see <http://www.gnu.org/licenses/>. +***/ + +#ifdef HAVE_CONFIG_H +#include <config.h> +#endif + +#include <pulse/cdecl.h> + +PA_C_DECL_BEGIN +#include <pulsecore/core-util.h> +#include <pulsecore/modargs.h> + +#include <pulse/timeval.h> +#include "echo-cancel.h" +PA_C_DECL_END + +#include <webrtc/modules/audio_processing/include/audio_processing.h> +#include <webrtc/modules/interface/module_common_types.h> +#include <webrtc/system_wrappers/include/trace.h> + +#define BLOCK_SIZE_US 10000 + +#define DEFAULT_HIGH_PASS_FILTER true +#define DEFAULT_NOISE_SUPPRESSION true +#define DEFAULT_ANALOG_GAIN_CONTROL true +#define DEFAULT_DIGITAL_GAIN_CONTROL false +#define DEFAULT_MOBILE false +#define DEFAULT_ROUTING_MODE "speakerphone" +#define DEFAULT_COMFORT_NOISE true +#define DEFAULT_DRIFT_COMPENSATION false +#define DEFAULT_VAD true +#define DEFAULT_EXTENDED_FILTER false +#define DEFAULT_INTELLIGIBILITY_ENHANCER false +#define DEFAULT_EXPERIMENTAL_AGC false +#define DEFAULT_AGC_START_VOLUME 85 +#define DEFAULT_BEAMFORMING false +#define DEFAULT_TRACE false + +#define WEBRTC_AGC_MAX_VOLUME 255 + +static const char* const valid_modargs[] = { + "high_pass_filter", + "noise_suppression", + "analog_gain_control", + "digital_gain_control", + "mobile", + "routing_mode", + "comfort_noise", + "drift_compensation", + "voice_detection", + "extended_filter", + "intelligibility_enhancer", + "experimental_agc", + "agc_start_volume", + "beamforming", + "mic_geometry", /* documented in parse_mic_geometry() */ + "target_direction", /* documented in parse_mic_geometry() */ + "trace", + NULL +}; + +static int routing_mode_from_string(const char *rmode) { + if (pa_streq(rmode, "quiet-earpiece-or-headset")) + return webrtc::EchoControlMobile::kQuietEarpieceOrHeadset; + else if (pa_streq(rmode, "earpiece")) + return webrtc::EchoControlMobile::kEarpiece; + else if (pa_streq(rmode, "loud-earpiece")) + return webrtc::EchoControlMobile::kLoudEarpiece; + else if (pa_streq(rmode, "speakerphone")) + return webrtc::EchoControlMobile::kSpeakerphone; + else if (pa_streq(rmode, "loud-speakerphone")) + return webrtc::EchoControlMobile::kLoudSpeakerphone; + else + return -1; +} + +class PaWebrtcTraceCallback : public webrtc::TraceCallback { + void Print(webrtc::TraceLevel level, const char *message, int length) + { + if (level & webrtc::kTraceError || level & webrtc::kTraceCritical) + pa_log(message); + else if (level & webrtc::kTraceWarning) + pa_log_warn(message); + else if (level & webrtc::kTraceInfo) + pa_log_info(message); + else + pa_log_debug(message); + } +}; + +static int webrtc_volume_from_pa(pa_volume_t v) +{ + return (v * WEBRTC_AGC_MAX_VOLUME) / PA_VOLUME_NORM; +} + +static pa_volume_t webrtc_volume_to_pa(int v) +{ + return (v * PA_VOLUME_NORM) / WEBRTC_AGC_MAX_VOLUME; +} + +static void webrtc_ec_fixate_spec(pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + bool beamforming) +{ + rec_ss->format = PA_SAMPLE_FLOAT32NE; + play_ss->format = PA_SAMPLE_FLOAT32NE; + + /* AudioProcessing expects one of the following rates */ + if (rec_ss->rate >= 48000) + rec_ss->rate = 48000; + else if (rec_ss->rate >= 32000) + rec_ss->rate = 32000; + else if (rec_ss->rate >= 16000) + rec_ss->rate = 16000; + else + rec_ss->rate = 8000; + + *out_ss = *rec_ss; + *out_map = *rec_map; + + if (beamforming) { + /* The beamformer gives us a single channel */ + out_ss->channels = 1; + pa_channel_map_init_mono(out_map); + } + + /* Playback stream rate needs to be the same as capture */ + play_ss->rate = rec_ss->rate; +} + +static bool parse_point(const char **point, float (&f)[3]) { + int ret, length; + + ret = sscanf(*point, "%g,%g,%g%n", &f[0], &f[1], &f[2], &length); + if (ret != 3) + return false; + + /* Consume the bytes we've read so far */ + *point += length; + + return true; +} + +static bool parse_mic_geometry(const char **mic_geometry, std::vector<webrtc::Point>& geometry) { + /* The microphone geometry is expressed as cartesian point form: + * x1,y1,z1,x2,y2,z2,... + * + * Where x1,y1,z1 is the position of the first microphone with regards to + * the array's "center", x2,y2,z2 the position of the second, and so on. + * + * 'x' is the horizontal coordinate, with positive values being to the + * right from the mic array's perspective. + * + * 'y' is the depth coordinate, with positive values being in front of the + * array. + * + * 'z' is the vertical coordinate, with positive values being above the + * array. + * + * All distances are in meters. + */ + + /* The target direction is expected to be in spherical point form: + * a,e,r + * + * Where 'a' is the azimuth of the target point relative to the center of + * the array, 'e' its elevation, and 'r' the radius. + * + * 0 radians azimuth is to the right of the array, and positive angles + * move in a counter-clockwise direction. + * + * 0 radians elevation is horizontal w.r.t. the array, and positive + * angles go upwards. + * + * radius is distance from the array center in meters. + */ + + long unsigned int i; + float f[3]; + + for (i = 0; i < geometry.size(); i++) { + if (!parse_point(mic_geometry, f)) { + pa_log("Failed to parse channel %lu in mic_geometry", i); + return false; + } + + /* Except for the last point, we should have a trailing comma */ + if (i != geometry.size() - 1) { + if (**mic_geometry != ',') { + pa_log("Failed to parse channel %lu in mic_geometry", i); + return false; + } + + (*mic_geometry)++; + } + + pa_log_debug("Got mic #%lu position: (%g, %g, %g)", i, f[0], f[1], f[2]); + + geometry[i].c[0] = f[0]; + geometry[i].c[1] = f[1]; + geometry[i].c[2] = f[2]; + } + + if (**mic_geometry != '\0') { + pa_log("Failed to parse mic_geometry value: more parameters than expected"); + return false; + } + + return true; +} + +bool pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec, + pa_sample_spec *rec_ss, pa_channel_map *rec_map, + pa_sample_spec *play_ss, pa_channel_map *play_map, + pa_sample_spec *out_ss, pa_channel_map *out_map, + uint32_t *nframes, const char *args) { + webrtc::AudioProcessing *apm = NULL; + webrtc::ProcessingConfig pconfig; + webrtc::Config config; + bool hpf, ns, agc, dgc, mobile, cn, vad, ext_filter, intelligibility, experimental_agc, beamforming; + int rm = -1, i; + uint32_t agc_start_volume; + pa_modargs *ma; + bool trace = false; + + if (!(ma = pa_modargs_new(args, valid_modargs))) { + pa_log("Failed to parse submodule arguments."); + goto fail; + } + + hpf = DEFAULT_HIGH_PASS_FILTER; + if (pa_modargs_get_value_boolean(ma, "high_pass_filter", &hpf) < 0) { + pa_log("Failed to parse high_pass_filter value"); + goto fail; + } + + ns = DEFAULT_NOISE_SUPPRESSION; + if (pa_modargs_get_value_boolean(ma, "noise_suppression", &ns) < 0) { + pa_log("Failed to parse noise_suppression value"); + goto fail; + } + + agc = DEFAULT_ANALOG_GAIN_CONTROL; + if (pa_modargs_get_value_boolean(ma, "analog_gain_control", &agc) < 0) { + pa_log("Failed to parse analog_gain_control value"); + goto fail; + } + + dgc = agc ? false : DEFAULT_DIGITAL_GAIN_CONTROL; + if (pa_modargs_get_value_boolean(ma, "digital_gain_control", &dgc) < 0) { + pa_log("Failed to parse digital_gain_control value"); + goto fail; + } + + if (agc && dgc) { + pa_log("You must pick only one between analog and digital gain control"); + goto fail; + } + + mobile = DEFAULT_MOBILE; + if (pa_modargs_get_value_boolean(ma, "mobile", &mobile) < 0) { + pa_log("Failed to parse mobile value"); + goto fail; + } + + ec->params.drift_compensation = DEFAULT_DRIFT_COMPENSATION; + if (pa_modargs_get_value_boolean(ma, "drift_compensation", &ec->params.drift_compensation) < 0) { + pa_log("Failed to parse drift_compensation value"); + goto fail; + } + + if (mobile) { + if (ec->params.drift_compensation) { + pa_log("Can't use drift_compensation in mobile mode"); + goto fail; + } + + if ((rm = routing_mode_from_string(pa_modargs_get_value(ma, "routing_mode", DEFAULT_ROUTING_MODE))) < 0) { + pa_log("Failed to parse routing_mode value"); + goto fail; + } + + cn = DEFAULT_COMFORT_NOISE; + if (pa_modargs_get_value_boolean(ma, "comfort_noise", &cn) < 0) { + pa_log("Failed to parse cn value"); + goto fail; + } + } else { + if (pa_modargs_get_value(ma, "comfort_noise", NULL) || pa_modargs_get_value(ma, "routing_mode", NULL)) { + pa_log("The routing_mode and comfort_noise options are only valid with mobile=true"); + goto fail; + } + } + + vad = DEFAULT_VAD; + if (pa_modargs_get_value_boolean(ma, "voice_detection", &vad) < 0) { + pa_log("Failed to parse voice_detection value"); + goto fail; + } + + ext_filter = DEFAULT_EXTENDED_FILTER; + if (pa_modargs_get_value_boolean(ma, "extended_filter", &ext_filter) < 0) { + pa_log("Failed to parse extended_filter value"); + goto fail; + } + + intelligibility = DEFAULT_INTELLIGIBILITY_ENHANCER; + if (pa_modargs_get_value_boolean(ma, "intelligibility_enhancer", &intelligibility) < 0) { + pa_log("Failed to parse intelligibility_enhancer value"); + goto fail; + } + + experimental_agc = DEFAULT_EXPERIMENTAL_AGC; + if (pa_modargs_get_value_boolean(ma, "experimental_agc", &experimental_agc) < 0) { + pa_log("Failed to parse experimental_agc value"); + goto fail; + } + + agc_start_volume = DEFAULT_AGC_START_VOLUME; + if (pa_modargs_get_value_u32(ma, "agc_start_volume", &agc_start_volume) < 0) { + pa_log("Failed to parse agc_start_volume value"); + goto fail; + } + if (agc_start_volume > WEBRTC_AGC_MAX_VOLUME) { + pa_log("AGC start volume must not exceed %u", WEBRTC_AGC_MAX_VOLUME); + goto fail; + } + ec->params.webrtc.agc_start_volume = agc_start_volume; + + beamforming = DEFAULT_BEAMFORMING; + if (pa_modargs_get_value_boolean(ma, "beamforming", &beamforming) < 0) { + pa_log("Failed to parse beamforming value"); + goto fail; + } + + if (ext_filter) + config.Set<webrtc::ExtendedFilter>(new webrtc::ExtendedFilter(true)); + if (intelligibility) + pa_log_warn("The intelligibility enhancer is not currently supported"); + if (experimental_agc) + config.Set<webrtc::ExperimentalAgc>(new webrtc::ExperimentalAgc(true, ec->params.webrtc.agc_start_volume)); + + trace = DEFAULT_TRACE; + if (pa_modargs_get_value_boolean(ma, "trace", &trace) < 0) { + pa_log("Failed to parse trace value"); + goto fail; + } + + if (trace) { + webrtc::Trace::CreateTrace(); + webrtc::Trace::set_level_filter(webrtc::kTraceAll); + ec->params.webrtc.trace_callback = new PaWebrtcTraceCallback(); + webrtc::Trace::SetTraceCallback((PaWebrtcTraceCallback *) ec->params.webrtc.trace_callback); + } + + webrtc_ec_fixate_spec(rec_ss, rec_map, play_ss, play_map, out_ss, out_map, beamforming); + + /* We do this after fixate because we need the capture channel count */ + if (beamforming) { + std::vector<webrtc::Point> geometry(rec_ss->channels); + webrtc::SphericalPointf direction(0.0f, 0.0f, 0.0f); + const char *mic_geometry, *target_direction; + + if (!(mic_geometry = pa_modargs_get_value(ma, "mic_geometry", NULL))) { + pa_log("mic_geometry must be set if beamforming is enabled"); + goto fail; + } + + if (!parse_mic_geometry(&mic_geometry, geometry)) { + pa_log("Failed to parse mic_geometry value"); + goto fail; + } + + if ((target_direction = pa_modargs_get_value(ma, "target_direction", NULL))) { + float f[3]; + + if (!parse_point(&target_direction, f)) { + pa_log("Failed to parse target_direction value"); + goto fail; + } + + if (*target_direction != '\0') { + pa_log("Failed to parse target_direction value: more parameters than expected"); + goto fail; + } + +#define IS_ZERO(f) ((f) < 0.000001 && (f) > -0.000001) + + if (!IS_ZERO(f[1]) || !IS_ZERO(f[2])) { + pa_log("The beamformer currently only supports targeting along the azimuth"); + goto fail; + } + + direction.s[0] = f[0]; + direction.s[1] = f[1]; + direction.s[2] = f[2]; + } + + if (!target_direction) + config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry)); + else + config.Set<webrtc::Beamforming>(new webrtc::Beamforming(true, geometry, direction)); + } + + apm = webrtc::AudioProcessing::Create(config); + + pconfig = { + webrtc::StreamConfig(rec_ss->rate, rec_ss->channels, false), /* input stream */ + webrtc::StreamConfig(out_ss->rate, out_ss->channels, false), /* output stream */ + webrtc::StreamConfig(play_ss->rate, play_ss->channels, false), /* reverse input stream */ + webrtc::StreamConfig(play_ss->rate, play_ss->channels, false), /* reverse output stream */ + }; + if (apm->Initialize(pconfig) != webrtc::AudioProcessing::kNoError) { + pa_log("Error initialising audio processing module"); + goto fail; + } + + if (hpf) + apm->high_pass_filter()->Enable(true); + + if (!mobile) { + apm->echo_cancellation()->enable_drift_compensation(ec->params.drift_compensation); + apm->echo_cancellation()->Enable(true); + } else { + apm->echo_control_mobile()->set_routing_mode(static_cast<webrtc::EchoControlMobile::RoutingMode>(rm)); + apm->echo_control_mobile()->enable_comfort_noise(cn); + apm->echo_control_mobile()->Enable(true); + } + + if (ns) { + apm->noise_suppression()->set_level(webrtc::NoiseSuppression::kHigh); + apm->noise_suppression()->Enable(true); + } + + if (agc || dgc) { + if (mobile && rm <= webrtc::EchoControlMobile::kEarpiece) { + /* Maybe this should be a knob, but we've got a lot of knobs already */ + apm->gain_control()->set_mode(webrtc::GainControl::kFixedDigital); + ec->params.webrtc.agc = false; + } else if (dgc) { + apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveDigital); + ec->params.webrtc.agc = false; + } else { + apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveAnalog); + if (apm->gain_control()->set_analog_level_limits(0, WEBRTC_AGC_MAX_VOLUME) != + webrtc::AudioProcessing::kNoError) { + pa_log("Failed to initialise AGC"); + goto fail; + } + ec->params.webrtc.agc = true; + } + + apm->gain_control()->Enable(true); + } + + if (vad) + apm->voice_detection()->Enable(true); + + ec->params.webrtc.apm = apm; + ec->params.webrtc.rec_ss = *rec_ss; + ec->params.webrtc.play_ss = *play_ss; + ec->params.webrtc.out_ss = *out_ss; + ec->params.webrtc.blocksize = (uint64_t) out_ss->rate * BLOCK_SIZE_US / PA_USEC_PER_SEC; + *nframes = ec->params.webrtc.blocksize; + ec->params.webrtc.first = true; + + for (i = 0; i < rec_ss->channels; i++) + ec->params.webrtc.rec_buffer[i] = pa_xnew(float, *nframes); + for (i = 0; i < play_ss->channels; i++) + ec->params.webrtc.play_buffer[i] = pa_xnew(float, *nframes); + + pa_modargs_free(ma); + return true; + +fail: + if (ma) + pa_modargs_free(ma); + if (ec->params.webrtc.trace_callback) { + webrtc::Trace::ReturnTrace(); + delete ((PaWebrtcTraceCallback *) ec->params.webrtc.trace_callback); + } if (apm) + delete apm; + + return false; +} + +void pa_webrtc_ec_play(pa_echo_canceller *ec, const uint8_t *play) { + webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.webrtc.apm; + const pa_sample_spec *ss = &ec->params.webrtc.play_ss; + int n = ec->params.webrtc.blocksize; + float **buf = ec->params.webrtc.play_buffer; + webrtc::StreamConfig config(ss->rate, ss->channels, false); + + pa_deinterleave(play, (void **) buf, ss->channels, pa_sample_size(ss), n); + + pa_assert_se(apm->ProcessReverseStream(buf, config, config, buf) == webrtc::AudioProcessing::kNoError); + + /* FIXME: If ProcessReverseStream() makes any changes to the audio, such as + * applying intelligibility enhancement, those changes don't have any + * effect. This function is called at the source side, but the processing + * would have to be done in the sink to be able to feed the processed audio + * to speakers. */ +} + +void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out) { + webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.webrtc.apm; + const pa_sample_spec *rec_ss = &ec->params.webrtc.rec_ss; + const pa_sample_spec *out_ss = &ec->params.webrtc.out_ss; + float **buf = ec->params.webrtc.rec_buffer; + int n = ec->params.webrtc.blocksize; + int old_volume, new_volume; + webrtc::StreamConfig rec_config(rec_ss->rate, rec_ss->channels, false); + webrtc::StreamConfig out_config(out_ss->rate, out_ss->channels, false); + + pa_deinterleave(rec, (void **) buf, rec_ss->channels, pa_sample_size(rec_ss), n); + + if (ec->params.webrtc.agc) { + pa_volume_t v = pa_echo_canceller_get_capture_volume(ec); + old_volume = webrtc_volume_from_pa(v); + apm->gain_control()->set_stream_analog_level(old_volume); + } + + apm->set_stream_delay_ms(0); + pa_assert_se(apm->ProcessStream(buf, rec_config, out_config, buf) == webrtc::AudioProcessing::kNoError); + + if (ec->params.webrtc.agc) { + if (PA_UNLIKELY(ec->params.webrtc.first)) { + /* We start at a sane default volume (taken from the Chromium + * condition on the experimental AGC in audio_processing.h). This is + * needed to make sure that there's enough energy in the capture + * signal for the AGC to work */ + ec->params.webrtc.first = false; + new_volume = ec->params.webrtc.agc_start_volume; + } else { + new_volume = apm->gain_control()->stream_analog_level(); + } + + if (old_volume != new_volume) + pa_echo_canceller_set_capture_volume(ec, webrtc_volume_to_pa(new_volume)); + } + + pa_interleave((const void **) buf, out_ss->channels, out, pa_sample_size(out_ss), n); +} + +void pa_webrtc_ec_set_drift(pa_echo_canceller *ec, float drift) { + webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.webrtc.apm; + + apm->echo_cancellation()->set_stream_drift_samples(drift * ec->params.webrtc.blocksize); +} + +void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) { + pa_webrtc_ec_play(ec, play); + pa_webrtc_ec_record(ec, rec, out); +} + +void pa_webrtc_ec_done(pa_echo_canceller *ec) { + int i; + + if (ec->params.webrtc.trace_callback) { + webrtc::Trace::ReturnTrace(); + delete ((PaWebrtcTraceCallback *) ec->params.webrtc.trace_callback); + } + + if (ec->params.webrtc.apm) { + delete (webrtc::AudioProcessing*)ec->params.webrtc.apm; + ec->params.webrtc.apm = NULL; + } + + for (i = 0; i < ec->params.webrtc.rec_ss.channels; i++) + pa_xfree(ec->params.webrtc.rec_buffer[i]); + for (i = 0; i < ec->params.webrtc.play_ss.channels; i++) + pa_xfree(ec->params.webrtc.play_buffer[i]); +} |