summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/mediacapture-streams/MediaStreamTrack-MediaElement-disabled-audio-is-silence.https.html
blob: 4ad2340ed5fd61b1ff19295dbbce009b56da19da (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
<!doctype html>
<html>
<head>
<title>A disabled audio track is rendered as silence</title>
<link rel="author" title="Dominique Hazael-Massieux" href="mailto:dom@w3.org"/>
<link rel="help" href="http://dev.w3.org/2011/webrtc/editor/getusermedia.html#introduction">
<link rel="help" href="http://dev.w3.org/2011/webrtc/editor/getusermedia.html#mediastreams-as-media-elements">
</head>
<body>
<p class="instructions">When prompted, accept to share your audio stream.</p>
<h1 class="instructions">Description</h1>
<p class="instructions">This test checks that a disabled audio track in a
MediaStream is rendered as silence. It relies on the
<a href="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html">
Web Audio API</a>.</p>

<div id='log'></div>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<script src=/resources/testdriver.js></script>
<script src=/resources/testdriver-vendor.js></script>
<script src=permission-helper.js></script>
<script>
const aud = document.getElementById("aud");
promise_test(async t => {
  await setMediaPermission("granted", ["microphone"]);
  const stream = await navigator.mediaDevices.getUserMedia({audio: true});
  var ctx = new AudioContext();
  var streamSource = ctx.createMediaStreamSource(stream);
  var silenceDetector = ctx.createScriptProcessor(1024);
  var count = 10;
  let resolveAudioProcessPromise;
  const audioProcessed = new Promise(res => resolveAudioProcessPromise = res)

  silenceDetector.onaudioprocess = function (e) {
    var buffer1 = e.inputBuffer.getChannelData(0);
    var buffer2 = e.inputBuffer.getChannelData(1);
    var out = e.outputBuffer.getChannelData(0);
    out = new Float32Array(buffer1);
    for (var i = 0; i < buffer1.length; i++) {
      assert_equals(buffer1[i], 0, "Audio buffer entry #" + i + " in channel 0 is silent");
    }
    for (var i = 0; i < buffer2.length; i++) {
      assert_equals(buffer2[i], 0, "Audio buffer entry #" + i + " in channel 1 is silent");
    }
    count--;
    if (count === 0) {
      silenceDetector.onaudioprocess = null;
      resolveAudioProcessPromise();
    }
  };
  stream.getAudioTracks()[0].enabled = false;

  streamSource.connect(silenceDetector);
  silenceDetector.connect(ctx.destination);
}, "Tests that a disabled audio track in a MediaStream is rendered as silence");
</script>
</body>
</html>