summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html
blob: 816eba0b29ae616e09099544e839b85bb0a8bea5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
<!DOCTYPE html>

<html class="a">
  <head>
    <title>MediaStreamAudioSourceNode</title>
    <script src="/resources/testharness.js"></script>
    <script src="/resources/testharnessreport.js"></script>
  </head>
  <body class="a">
    <div id="log"></div>
    <script>
      function binIndexForFrequency(frequency, analyser) {
        return (
          1 +
          Math.round(
            (frequency * analyser.fftSize) / analyser.context.sampleRate
          )
        );
      }

      const t = async_test(
        "MediaStreamAudioSourceNode captures the right track."
      );
      t.step(function() {
          const ac = new AudioContext();
          // Test that the right track is captured. Set up a MediaStream that has two
          // tracks, one with a tone at 100Hz and one with a tone at 1000Hz.
          const dest0 = ac.createMediaStreamDestination();
          const dest1 = ac.createMediaStreamDestination();
          const osc0 = ac.createOscillator();
          const osc1 = ac.createOscillator();
          osc0.frequency.value = 100;
          osc1.frequency.value = 1000;
          osc0.connect(dest0);
          osc1.connect(dest1);
          osc0.start(0);
          osc1.start(0);
          const track0 = dest0.stream.getAudioTracks()[0];
          const track0id = track0.id;
          const track1 = dest1.stream.getAudioTracks()[0];
          const track1id = track1.id;

          let ids = [track0id, track1id];
          ids.sort();
          let targetFrequency;
          let otherFrequency;
          if (ids[0] == track0id) {
              targetFrequency = 100;
              otherFrequency = 1000;
          } else {
              targetFrequency = 1000;
              otherFrequency = 100;
          }

          let twoTrackMediaStream = new MediaStream();
          twoTrackMediaStream.addTrack(track0);
          twoTrackMediaStream.addTrack(track1);

          const twoTrackSource = ac.createMediaStreamSource(twoTrackMediaStream);
          const analyser = ac.createAnalyser();
          // Don't do smoothing so that the frequency data changes quickly
          analyser.smoothingTimeConstant = 0;

          twoTrackSource.connect(analyser);

          const indexToCheckForHighEnergy = binIndexForFrequency(
              targetFrequency,
              analyser
          );
          const indexToCheckForLowEnergy = binIndexForFrequency(
              otherFrequency,
              analyser
          );
          let frequencyData = new Float32Array(1024);
          let checkCount = 0;
          let numberOfRemovals = 0;
          let stopped = false;
          function analyse() {
              analyser.getFloatFrequencyData(frequencyData);
              // there should be high energy in the right bin, higher than 40dbfs because
              // it's supposed to be a sine wave at 0dbfs
              if (frequencyData[indexToCheckForHighEnergy] > -40 && !stopped) {
                  assert_true(true, "Correct track routed to the AudioContext.");
                  checkCount++;
              }
              if (stopped && frequencyData[indexToCheckForHighEnergy] < -40) {
                  assert_true(
                      true,
                      `After stopping the track, low energy is found in the
              same bin`
                  );
                  checkCount++;
              }
              if (checkCount > 5 && checkCount < 20) {
                  twoTrackMediaStream.getAudioTracks().forEach(track => {
                      if (track.id == ids[0]) {
                          numberOfRemovals++;
                          window.removedTrack = track;
                          twoTrackMediaStream.removeTrack(track);
                      }
                  });
                  assert_true(
                      numberOfRemovals == 1,
                      `The mediastreamtrack can only be
        removed once from the mediastream`
                  );
              } else if (checkCount >= 20 && checkCount < 30) {
                  window.removedTrack.stop();
                  stopped = true;
              } else if (checkCount >= 30) {
                  assert_true(
                      numberOfRemovals == 1,
                      `After removing the track from the
        mediastream, it's still routed to the graph.`
                  );
                  // After some time, consider that it worked.
                  t.done();
                  return;
              }

              t.step_timeout(analyse, 100);
          }
          t.step_timeout(analyse, 100);
      });
    </script>
  </body>
</html>