summaryrefslogtreecommitdiffstats
path: root/dom/media/webrtc/tests/mochitests/test_getUserMedia_audioCapture.html
blob: 95e766a9e33f3e94382a30f586f4bb1df2d24d23 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
<!DOCTYPE HTML>
<html>
<head>
  <title>Test AudioCapture </title>
  <script type="application/javascript" src="mediaStreamPlayback.js"></script>
</head>
<body>
<pre id="test">
<script>

(async () => {
  await createHTML({
    bug: "1156472",
    title: "Test AudioCapture with regular HTMLMediaElement, AudioContext, " +
           "and HTMLMediaElement playing a MediaStream",
    visible: true
  });

  await runTestWhenReady(async () => {
    // Reduce noise for when --use-test-media-devices is not in use.
    await SpecialPowers.pushPrefEnv({ set: [[ "media.volume_scale", "0.0" ]] });
    /**
     * Get two HTMLMediaElements:
     * - One playing a sine tone from a blob (of an opus file created on the fly)
     * - One being the output for an AudioContext's OscillatorNode, connected to
     *   a MediaSourceDestinationNode.
     *
     * Also, use the AudioContext playing through its AudioDestinationNode another
     * tone, using another OscillatorNode.
     *
     * Capture the output of the document, feed that back into the AudioContext,
     * with an AnalyserNode, and check the frequency content to make sure we
     * have recorded the three sources.
     *
     * The three sine tones have frequencies far apart from each other, so that we
     * can check that the spectrum of the capture stream contains three
     * components with a high magnitude.
     */
    const decodedTone = createMediaElement("audio", "decodedTone");
    decodedTone.muted = false;
    const decodedToneMuted = createMediaElement("audio", "decodedToneMuted");
    const acTone = createMediaElement("audio", "audioContextTone");
    acTone.muted = false;
    const acToneMuted = createMediaElement("audio", "audioContextToneMuted");
    const micTone = createMediaElement("audio", "audioContextTone");
    micTone.muted = false;
    const ac = new AudioContext();
    const constraints = {audio: {mediaSource: "audioCapture"}};
    const stream = await getUserMedia(constraints);
    const analyser = new AudioStreamAnalyser(ac, stream);

    const osc200 = ac.createOscillator();
    osc200.frequency.value = analyser.frequencyForBinIndex(200);
    const osc250 = ac.createOscillator();
    osc250.frequency.value = analyser.frequencyForBinIndex(250);
    const oscThroughAudioDestinationNode = ac.createOscillator();
    oscThroughAudioDestinationNode.frequency.value =
      analyser.frequencyForBinIndex(100);
    const msDest200 = ac.createMediaStreamDestination();
    const msDest250 = ac.createMediaStreamDestination();

    osc200.connect(msDest200);
    osc250.connect(msDest250);
    oscThroughAudioDestinationNode.connect(ac.destination);

    acTone.srcObject = msDest200.stream;
    acToneMuted.srcObject = msDest250.stream;

    // The fake microphone uses signed 16-bit audio data:
    await SpecialPowers.pushPrefEnv({ set: [
      [
        "media.navigator.audio.fake_frequency",
        analyser.frequencyForBinIndex(300)
      ]
    ]});
    const micStream = await navigator.mediaDevices.getUserMedia({
      'audio': true, 'fake': true
    });
    micTone.srcObject = micStream;

    decodedTone.src = "/tests/dom/media/test/sin-441-1s-44100.flac";
    decodedToneMuted.src = "/tests/dom/media/test/sin-441-1s-44100.flac";
    decodedTone.preservesPitch = false;
    decodedToneMuted.preservesPitch = false;
    decodedTone.playbackRate = analyser.frequencyForBinIndex(20) / 441;
    ok(decodedTone.playbackRate >= 1/16, "within firefox min playback rate");
    decodedToneMuted.playbackRate = analyser.frequencyForBinIndex(40) / 441;
    ok(decodedToneMuted.playbackRate <= 16, "within firefox max playback rate");
    osc200.start();
    osc250.start();
    oscThroughAudioDestinationNode.start();
    decodedTone.loop = true;
    decodedToneMuted.loop = true;
    decodedTone.play();
    decodedToneMuted.play();
    acTone.play();
    acToneMuted.play();

    let frequencyBytes;
    try {
      analyser.enableDebugCanvas();
      await analyser.waitForAnalysisSuccess(array => {
        // We want to find three frequency components here.
        // Frequencies are logarithmic. Also make sure we have low
        // energy in between, not just a flat white noise.
        if (array[10]  == 0 &&
            array[20]  == 255 &&
            array[40]  == 0 &&
            array[70]  == 0 &&
            array[100] == 255 &&
            array[150] == 0 &&
            array[200] == 255 &&
            array[300] >  240) {
          frequencyBytes = new Uint8Array(array);
          return true;
        }
        return false;
      });
    } finally {
      await ac.close();
      for (let t of [...stream.getTracks(), ...micStream.getTracks()]) {
        t.stop();
      }
    }
    todo(frequencyBytes[250] < 50, "muted MediaStream srcObject - bug 1864067");
  });
})();
</script>
</pre>
</body>
</html>