summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html
blob: 4e0de21e96fa8470f5305d9405bdccf8f937e660 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
<!DOCTYPE html>
<!--
Test AudioBufferSourceNode supports 5.1 channel.
-->
<html>
  <head>
    <title>
      audiobuffersource-multi-channels.html
    </title>
    <script src="/resources/testharness.js"></script>
    <script src="/resources/testharnessreport.js"></script>
    <script src="/webaudio/resources/audit-util.js"></script>
    <script src="/webaudio/resources/audit.js"></script>
    <script src="/webaudio/resources/mix-testing.js"></script>
  </head>
  <body>
    <script id="layout-test-code">
      let audit = Audit.createTaskRunner();
      let context;
      let expectedAudio;

      audit.define('initialize', (task, should) => {
        // Create offline audio context
        let sampleRate = 44100.0;
        should(() => {
          context = new OfflineAudioContext(
              6, sampleRate * toneLengthSeconds, sampleRate);
        }, 'Creating context for testing').notThrow();
        should(
          Audit
            .loadFileFromUrl('resources/audiobuffersource-multi-channels-expected.wav')
            .then(arrayBuffer => {
              context.decodeAudioData(arrayBuffer).then(audioBuffer => {
                expectedAudio = audioBuffer;
                task.done();
              }).catch(error => {
                assert_unreached("Could not decode audio data due to " + error.message);
              })
            })
          , 'Fetching expected audio').beResolved();
      });

      audit.define(
          {label: 'test', description: 'AudioBufferSource with 5.1 buffer'},
          (task, should) => {
            let toneBuffer =
                createToneBuffer(context, 440, toneLengthSeconds, 6);

            let source = context.createBufferSource();
            source.buffer = toneBuffer;

            source.connect(context.destination);
            source.start(0);

            context.startRendering()
                .then(renderedAudio => {
                  // Compute a threshold based on the maximum error, |maxUlp|,
                  // in ULP. This is experimentally determined. Assuming that
                  // the reference file is a 16-bit wav file, the max values in
                  // the wave file are +/- 32768.
                  let maxUlp = 1;
                  let threshold = maxUlp / 32768;
                  for (let k = 0; k < renderedAudio.numberOfChannels; ++k) {
                    should(
                        renderedAudio.getChannelData(k),
                        'Rendered audio for channel ' + k)
                        .beCloseToArray(
                            expectedAudio.getChannelData(k),
                            {absoluteThreshold: threshold});
                  }
                })
                .then(() => task.done());
          });

      audit.run();
    </script>
  </body>
</html>