summaryrefslogtreecommitdiffstats
path: root/dom/media/webaudio/test/test_convolverNodeChannelInterpretationChanges.html
blob: bede517b2e7410d34722ecaac2646db2cb1a2ef9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
<!DOCTYPE html>
<title>Test up-mixing in ConvolverNode after ChannelInterpretation change</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
// This test is not in wpt because it requires that multiple changes to the
// nodes in an AudioContext during a single event will be processed by the
// audio thread in a single transaction.  Gecko provides that, but this is not
// currently required by the Web Audio API.

const EPSILON = Math.pow(2, -23);
// sampleRate is a power of two so that delay times are exact in base-2
// floating point arithmetic.
const SAMPLE_RATE = 32768;
// Length of initial mono signal in frames, if the test has an initial mono
// signal.  This is more than one block to ensure that at least one block
// will be mono, even if interpolation in the DelayNode means that stereo is
// output one block earlier than if frames are delayed without interpolation.
const MONO_FRAMES = 256;
// Length of response buffer.  This is greater than 1 to ensure that the
// convolver has stereo output at least one block after stereo input is
// disconnected.
const RESPONSE_FRAMES = 2;

function test_interpretation_change(t, initialInterpretation, initialMonoFrames)
{
  let context = new AudioContext({sampleRate: SAMPLE_RATE});

  // Three independent signals.  These are constant so that results are
  // independent of the timing of the `ended` event.
  let monoOffset = 0.25
  let monoSource = new ConstantSourceNode(context, {offset: monoOffset});
  let leftOffset = 0.125;
  let rightOffset = 0.5;
  let leftSource = new ConstantSourceNode(context, {offset: leftOffset});
  let rightSource = new ConstantSourceNode(context, {offset: rightOffset});
  monoSource.start();
  leftSource.start();
  rightSource.start();

  let stereoMerger = new ChannelMergerNode(context, {numberOfInputs: 2});
  leftSource.connect(stereoMerger, 0, 0);
  rightSource.connect(stereoMerger, 0, 1);

  // The DelayNode initially has a single channel of silence, and so the
  // output of the delay node is first mono silence (if there is a non-zero
  // initialMonoFrames), then stereo.  In Gecko, this triggers a convolver
  // configuration that is different for different channelInterpretations.
  let delay =
      new DelayNode(context,
                    {maxDelayTime: MONO_FRAMES / context.sampleRate,
                     delayTime: initialMonoFrames / context.sampleRate});
  stereoMerger.connect(delay);

  // Two convolvers with the same impulse response.  The test convolver will
  // process a mix of stereo and mono signals.  The reference convolver will
  // always process stereo, including the up-mixed mono signal.
  let response = new AudioBuffer({numberOfChannels: 1,
                                  length: RESPONSE_FRAMES,
                                  sampleRate: context.sampleRate});
  response.getChannelData(0)[response.length - 1] = 1;

  let testConvolver = new ConvolverNode(context,
                                        {disableNormalization: true,
                                         buffer: response});
  testConvolver.channelInterpretation = initialInterpretation;
  let referenceConvolver = new ConvolverNode(context,
                                             {disableNormalization: true,
                                              buffer: response});
  // No need to set referenceConvolver.channelInterpretation because
  // input is always stereo, due to up-mixing at gain node.
  let referenceMixer = new GainNode(context);
  referenceMixer.channelCount = 2;
  referenceMixer.channelCountMode = "explicit";
  referenceMixer.channelInterpretation = initialInterpretation;
  referenceMixer.connect(referenceConvolver);

  delay.connect(testConvolver);
  delay.connect(referenceMixer);

  monoSource.connect(testConvolver);
  monoSource.connect(referenceMixer);

  // A timer sends 'ended' when the convolvers are known to be processing
  // stereo.
  let timer = new ConstantSourceNode(context);
  timer.start();
  timer.stop((initialMonoFrames + 1) / context.sampleRate);

  timer.onended = t.step_func(() => {
    let changedInterpretation =
        initialInterpretation == "speakers" ? "discrete" : "speakers";

    // Switch channelInterpretation in test and reference paths.
    testConvolver.channelInterpretation = changedInterpretation;
    referenceMixer.channelInterpretation = changedInterpretation;

    // Disconnect the stereo input from both test and reference convolvers.
    // The disconnected convolvers will continue to output stereo for at least
    // one frame.  The test convolver will up-mix its mono input into its two
    // buffers.
    delay.disconnect();

    // Capture the outputs in a script processor.
    //
    // The first two channels contain signal where some up-mixing occurs
    // internally to the test convolver.
    //
    // The last two channels are expected to contain the same signal, but
    // up-mixing was performed at a GainNode prior to convolution.
    //
    // Two stereo splitters will collect test and reference outputs.
    let testSplitter =
        new ChannelSplitterNode(context, {numberOfOutputs: 2});
    let referenceSplitter =
        new ChannelSplitterNode(context, {numberOfOutputs: 2});
    testConvolver.connect(testSplitter);
    referenceConvolver.connect(referenceSplitter);

    let outputMerger = new ChannelMergerNode(context, {numberOfInputs: 4});
    testSplitter.connect(outputMerger, 0, 0);
    testSplitter.connect(outputMerger, 1, 1);
    referenceSplitter.connect(outputMerger, 0, 2);
    referenceSplitter.connect(outputMerger, 1, 3);

    let processor = context.createScriptProcessor(256, 4, 0);
    outputMerger.connect(processor);

    processor.onaudioprocess = t.step_func_done((e) => {
      e.target.onaudioprocess = null;
      outputMerger.disconnect();

      // The test convolver output is stereo for the first block.
      let length = 128;

      let buffer = e.inputBuffer;
      let maxDiff = -1.0;
      let frameIndex = 0;
      let channelIndex = 0;
      for (let c = 0; c < 2; ++c) {
        let testOutput = buffer.getChannelData(0 + c);
        let referenceOutput = buffer.getChannelData(2 + c);
        for (var i = 0; i < length; ++i) {
          var diff = Math.abs(testOutput[i] - referenceOutput[i]);
          if (diff > maxDiff) {
            maxDiff = diff;
            frameIndex = i;
            channelIndex = c;
          }
        }
      }
      assert_approx_equals(buffer.getChannelData(0 + channelIndex)[frameIndex],
                           buffer.getChannelData(2 + channelIndex)[frameIndex],
                           EPSILON,
                           `output at ${frameIndex} ` +
                             `in channel ${channelIndex}` );
    });
  });
}

async_test((t) => test_interpretation_change(t, "speakers", MONO_FRAMES),
           "speakers to discrete, initially mono");
async_test((t) => test_interpretation_change(t, "discrete", MONO_FRAMES),
           "discrete to speakers");
// Gecko uses a separate path for "speakers" initial up-mixing when the
// convolver's first input is stereo, so test that separately.
async_test((t) => test_interpretation_change(t, "speakers", 0),
           "speakers to discrete, initially stereo");
</script>