summaryrefslogtreecommitdiffstats
path: root/third_party/libwebrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/signal_processing.py
blob: 95e801903db88749b302eaf0e5d3679898fed17f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS.  All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Signal processing utility module.
"""

import array
import logging
import os
import sys
import enum

try:
    import numpy as np
except ImportError:
    logging.critical('Cannot import the third-party Python package numpy')
    sys.exit(1)

try:
    import pydub
    import pydub.generators
except ImportError:
    logging.critical('Cannot import the third-party Python package pydub')
    sys.exit(1)

try:
    import scipy.signal
    import scipy.fftpack
except ImportError:
    logging.critical('Cannot import the third-party Python package scipy')
    sys.exit(1)

from . import exceptions


class SignalProcessingUtils(object):
    """Collection of signal processing utilities.
  """

    @enum.unique
    class MixPadding(enum.Enum):
        NO_PADDING = 0
        ZERO_PADDING = 1
        LOOP = 2

    def __init__(self):
        pass

    @classmethod
    def LoadWav(cls, filepath, channels=1):
        """Loads wav file.

    Args:
      filepath: path to the wav audio track file to load.
      channels: number of channels (downmixing to mono by default).

    Returns:
      AudioSegment instance.
    """
        if not os.path.exists(filepath):
            logging.error('cannot find the <%s> audio track file', filepath)
            raise exceptions.FileNotFoundError()
        return pydub.AudioSegment.from_file(filepath,
                                            format='wav',
                                            channels=channels)

    @classmethod
    def SaveWav(cls, output_filepath, signal):
        """Saves wav file.

    Args:
      output_filepath: path to the wav audio track file to save.
      signal: AudioSegment instance.
    """
        return signal.export(output_filepath, format='wav')

    @classmethod
    def CountSamples(cls, signal):
        """Number of samples per channel.

    Args:
      signal: AudioSegment instance.

    Returns:
      An integer.
    """
        number_of_samples = len(signal.get_array_of_samples())
        assert signal.channels > 0
        assert number_of_samples % signal.channels == 0
        return number_of_samples / signal.channels

    @classmethod
    def GenerateSilence(cls, duration=1000, sample_rate=48000):
        """Generates silence.

    This method can also be used to create a template AudioSegment instance.
    A template can then be used with other Generate*() methods accepting an
    AudioSegment instance as argument.

    Args:
      duration: duration in ms.
      sample_rate: sample rate.

    Returns:
      AudioSegment instance.
    """
        return pydub.AudioSegment.silent(duration, sample_rate)

    @classmethod
    def GeneratePureTone(cls, template, frequency=440.0):
        """Generates a pure tone.

    The pure tone is generated with the same duration and in the same format of
    the given template signal.

    Args:
      template: AudioSegment instance.
      frequency: Frequency of the pure tone in Hz.

    Return:
      AudioSegment instance.
    """
        if frequency > template.frame_rate >> 1:
            raise exceptions.SignalProcessingException('Invalid frequency')

        generator = pydub.generators.Sine(sample_rate=template.frame_rate,
                                          bit_depth=template.sample_width * 8,
                                          freq=frequency)

        return generator.to_audio_segment(duration=len(template), volume=0.0)

    @classmethod
    def GenerateWhiteNoise(cls, template):
        """Generates white noise.

    The white noise is generated with the same duration and in the same format
    of the given template signal.

    Args:
      template: AudioSegment instance.

    Return:
      AudioSegment instance.
    """
        generator = pydub.generators.WhiteNoise(
            sample_rate=template.frame_rate,
            bit_depth=template.sample_width * 8)
        return generator.to_audio_segment(duration=len(template), volume=0.0)

    @classmethod
    def AudioSegmentToRawData(cls, signal):
        samples = signal.get_array_of_samples()
        if samples.typecode != 'h':
            raise exceptions.SignalProcessingException(
                'Unsupported samples type')
        return np.array(signal.get_array_of_samples(), np.int16)

    @classmethod
    def Fft(cls, signal, normalize=True):
        if signal.channels != 1:
            raise NotImplementedError('multiple-channel FFT not implemented')
        x = cls.AudioSegmentToRawData(signal).astype(np.float32)
        if normalize:
            x /= max(abs(np.max(x)), 1.0)
        y = scipy.fftpack.fft(x)
        return y[:len(y) / 2]

    @classmethod
    def DetectHardClipping(cls, signal, threshold=2):
        """Detects hard clipping.

    Hard clipping is simply detected by counting samples that touch either the
    lower or upper bound too many times in a row (according to `threshold`).
    The presence of a single sequence of samples meeting such property is enough
    to label the signal as hard clipped.

    Args:
      signal: AudioSegment instance.
      threshold: minimum number of samples at full-scale in a row.

    Returns:
      True if hard clipping is detect, False otherwise.
    """
        if signal.channels != 1:
            raise NotImplementedError(
                'multiple-channel clipping not implemented')
        if signal.sample_width != 2:  # Note that signal.sample_width is in bytes.
            raise exceptions.SignalProcessingException(
                'hard-clipping detection only supported for 16 bit samples')
        samples = cls.AudioSegmentToRawData(signal)

        # Detect adjacent clipped samples.
        samples_type_info = np.iinfo(samples.dtype)
        mask_min = samples == samples_type_info.min
        mask_max = samples == samples_type_info.max

        def HasLongSequence(vector, min_legth=threshold):
            """Returns True if there are one or more long sequences of True flags."""
            seq_length = 0
            for b in vector:
                seq_length = seq_length + 1 if b else 0
                if seq_length >= min_legth:
                    return True
            return False

        return HasLongSequence(mask_min) or HasLongSequence(mask_max)

    @classmethod
    def ApplyImpulseResponse(cls, signal, impulse_response):
        """Applies an impulse response to a signal.

    Args:
      signal: AudioSegment instance.
      impulse_response: list or numpy vector of float values.

    Returns:
      AudioSegment instance.
    """
        # Get samples.
        assert signal.channels == 1, (
            'multiple-channel recordings not supported')
        samples = signal.get_array_of_samples()

        # Convolve.
        logging.info(
            'applying %d order impulse response to a signal lasting %d ms',
            len(impulse_response), len(signal))
        convolved_samples = scipy.signal.fftconvolve(in1=samples,
                                                     in2=impulse_response,
                                                     mode='full').astype(
                                                         np.int16)
        logging.info('convolution computed')

        # Cast.
        convolved_samples = array.array(signal.array_type, convolved_samples)

        # Verify.
        logging.debug('signal length: %d samples', len(samples))
        logging.debug('convolved signal length: %d samples',
                      len(convolved_samples))
        assert len(convolved_samples) > len(samples)

        # Generate convolved signal AudioSegment instance.
        convolved_signal = pydub.AudioSegment(data=convolved_samples,
                                              metadata={
                                                  'sample_width':
                                                  signal.sample_width,
                                                  'frame_rate':
                                                  signal.frame_rate,
                                                  'frame_width':
                                                  signal.frame_width,
                                                  'channels': signal.channels,
                                              })
        assert len(convolved_signal) > len(signal)

        return convolved_signal

    @classmethod
    def Normalize(cls, signal):
        """Normalizes a signal.

    Args:
      signal: AudioSegment instance.

    Returns:
      An AudioSegment instance.
    """
        return signal.apply_gain(-signal.max_dBFS)

    @classmethod
    def Copy(cls, signal):
        """Makes a copy os a signal.

    Args:
      signal: AudioSegment instance.

    Returns:
      An AudioSegment instance.
    """
        return pydub.AudioSegment(data=signal.get_array_of_samples(),
                                  metadata={
                                      'sample_width': signal.sample_width,
                                      'frame_rate': signal.frame_rate,
                                      'frame_width': signal.frame_width,
                                      'channels': signal.channels,
                                  })

    @classmethod
    def MixSignals(cls,
                   signal,
                   noise,
                   target_snr=0.0,
                   pad_noise=MixPadding.NO_PADDING):
        """Mixes `signal` and `noise` with a target SNR.

    Mix `signal` and `noise` with a desired SNR by scaling `noise`.
    If the target SNR is +/- infinite, a copy of signal/noise is returned.
    If `signal` is shorter than `noise`, the length of the mix equals that of
    `signal`. Otherwise, the mix length depends on whether padding is applied.
    When padding is not applied, that is `pad_noise` is set to NO_PADDING
    (default), the mix length equals that of `noise` - i.e., `signal` is
    truncated. Otherwise, `noise` is extended and the resulting mix has the same
    length of `signal`.

    Args:
      signal: AudioSegment instance (signal).
      noise: AudioSegment instance (noise).
      target_snr: float, numpy.Inf or -numpy.Inf (dB).
      pad_noise: SignalProcessingUtils.MixPadding, default: NO_PADDING.

    Returns:
      An AudioSegment instance.
    """
        # Handle infinite target SNR.
        if target_snr == -np.Inf:
            # Return a copy of noise.
            logging.warning('SNR = -Inf, returning noise')
            return cls.Copy(noise)
        elif target_snr == np.Inf:
            # Return a copy of signal.
            logging.warning('SNR = +Inf, returning signal')
            return cls.Copy(signal)

        # Check signal and noise power.
        signal_power = float(signal.dBFS)
        noise_power = float(noise.dBFS)
        if signal_power == -np.Inf:
            logging.error('signal has -Inf power, cannot mix')
            raise exceptions.SignalProcessingException(
                'cannot mix a signal with -Inf power')
        if noise_power == -np.Inf:
            logging.error('noise has -Inf power, cannot mix')
            raise exceptions.SignalProcessingException(
                'cannot mix a signal with -Inf power')

        # Mix.
        gain_db = signal_power - noise_power - target_snr
        signal_duration = len(signal)
        noise_duration = len(noise)
        if signal_duration <= noise_duration:
            # Ignore `pad_noise`, `noise` is truncated if longer that `signal`, the
            # mix will have the same length of `signal`.
            return signal.overlay(noise.apply_gain(gain_db))
        elif pad_noise == cls.MixPadding.NO_PADDING:
            # `signal` is longer than `noise`, but no padding is applied to `noise`.
            # Truncate `signal`.
            return noise.overlay(signal, gain_during_overlay=gain_db)
        elif pad_noise == cls.MixPadding.ZERO_PADDING:
            # TODO(alessiob): Check that this works as expected.
            return signal.overlay(noise.apply_gain(gain_db))
        elif pad_noise == cls.MixPadding.LOOP:
            # `signal` is longer than `noise`, extend `noise` by looping.
            return signal.overlay(noise.apply_gain(gain_db), loop=True)
        else:
            raise exceptions.SignalProcessingException('invalid padding type')