summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/mediacapture-insertable-streams
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /testing/web-platform/tests/mediacapture-insertable-streams
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/mediacapture-insertable-streams')
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/META.yml6
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-backpressure.worker.js56
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-maxBufferSize.worker.js82
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-with-window-tracks.https.html77
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor.worker.js47
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator-with-window-tracks.https.html280
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator.worker.js233
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/idlharness.any.js14
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-audio.https.html97
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-service-worker.https.html24
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-shared-worker.https.html22
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-worker.https.html39
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-pipes-data-in-worker.https.html41
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-video.https.html285
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-backpressure.https.html69
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-video.https.html97
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-worker.js17
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/dedicated-worker.js11
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/service-worker.js8
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/legacy/shared-worker.js11
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-audio.https.html54
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-worker.js17
-rw-r--r--testing/web-platform/tests/mediacapture-insertable-streams/tentative/VideoTrackGenerator.https.html327
23 files changed, 1914 insertions, 0 deletions
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/META.yml b/testing/web-platform/tests/mediacapture-insertable-streams/META.yml
new file mode 100644
index 0000000000..0b7ae4d815
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/META.yml
@@ -0,0 +1,6 @@
+spec: https://w3c.github.io/mediacapture-transform/
+suggested_reviewers:
+ - alvestrand
+ - guidou
+ - youennf
+ - jan-ivar
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-backpressure.worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-backpressure.worker.js
new file mode 100644
index 0000000000..9e03e650e0
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-backpressure.worker.js
@@ -0,0 +1,56 @@
+// META: title=MediaStreamTrackProcessor backpressure tests.
+
+importScripts("/resources/testharness.js");
+
+const height = 240;
+const width = 320;
+
+const inputCanvas = new OffscreenCanvas(width, height);
+const inputCtx = inputCanvas.getContext('2d', {alpha: false});
+inputCtx.fillStyle = 'black';
+inputCtx.fillRect(0, 0, width, height);
+
+const frameDuration = 40;
+
+function makeUniformVideoFrame(timestamp) {
+ return new VideoFrame(inputCanvas, {timestamp, alpha: 'discard'});
+}
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(
+ t.step_func(async () => {
+ if (generator.readyState === 'live') {
+ timestamp++;
+ await writer.write(makeUniformVideoFrame(timestamp));
+ }
+ }),
+ frameDuration);
+ t.add_cleanup(() => clearInterval(intervalId));
+ t.step_timeout(function() {
+ clearInterval(intervalId);
+ generator.track.stop();
+ }, 2000);
+ const processor = new MediaStreamTrackProcessor(generator);
+ let ts = 1;
+ await processor.readable.pipeTo(new WritableStream({
+ async write(frame) {
+ if (ts === 1) {
+ assert_equals(frame.timestamp, ts, "Timestamp mismatch");
+ } else {
+ assert_greater_than_equal(frame.timestamp, ts, "Backpressure should have resulted in skipping at least 3 frames");
+ }
+ frame.close();
+ ts+=3;
+ // Wait the equivalent of 3 frames
+ return new Promise((res) => t.step_timeout(res, 3*frameDuration));
+ }
+ }));
+}, "Tests that backpressure forces MediaStreamTrackProcess to skip frames");
+
+done();
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-maxBufferSize.worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-maxBufferSize.worker.js
new file mode 100644
index 0000000000..3c64e590c4
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-maxBufferSize.worker.js
@@ -0,0 +1,82 @@
+// META: title=MediaStreamTrackProcessor maxBufferSize
+importScripts("/resources/testharness.js");
+
+function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(100, 100);
+ const ctx = canvas.getContext('2d');
+ return new VideoFrame(canvas, {timestamp});
+}
+
+promise_test(async t => {
+ // The generator will be used as the source for the processor to
+ // produce frames in a controlled manner.
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+ // Use a larger maxBufferSize than the default to ensure no frames
+ // will be dropped.
+ const processor = new MediaStreamTrackProcessor({track: generator.track, maxBufferSize:10});
+ const reader = processor.readable.getReader();
+ const writer = generator.writable.getWriter();
+
+ let numReads = 0;
+ let resolve = null;
+ const promise = new Promise(r => resolve = r);
+
+ const numOperations = 4;
+ // Issue reads without waiting for the frames to arrive.
+ for (let i = 0; i < numOperations; i++) {
+ reader.read().then(dv=> {
+ dv.value.close();
+ if (++numReads == numOperations)
+ resolve();
+ });
+ }
+
+ // Write video frames in different tasks to "slowly" settle the pending read
+ // requests.
+ for (let i = 0; i<numOperations; i++) {
+ await writer.write(makeVideoFrame(i));
+ await new Promise(r=>t.step_timeout(r, 0));
+ }
+
+ return promise;
+
+}, "Tests that multiple read requests are eventually settled");
+
+promise_test(async t => {
+ // The generator will be used as the source for the processor to
+ // produce frames in a controlled manner.
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+ // Use a larger maxBufferSize than the default to ensure no frames
+ // will be dropped.
+ const processor = new MediaStreamTrackProcessor({track: generator.track, maxBufferSize:10});
+ const reader = processor.readable.getReader();
+ const writer = generator.writable.getWriter();
+
+ const numOperations = 4;
+ // Write video frames as fast as we can with "slower" reads.
+ // requests.
+ for (let i = 0; i<numOperations; i++) {
+ await writer.write(makeVideoFrame(i));
+ }
+
+ let numReads = 0;
+ let resolve = null;
+ const promise = new Promise(r => resolve = r);
+
+ // Issue reads without waiting for the frames to arrive.
+ for (let i = 0; i < numOperations; i++) {
+ await new Promise(r=>t.step_timeout(r, 50));
+ reader.read().then(dv=> {
+ dv.value.close();
+ if (++numReads == numOperations)
+ resolve();
+ });
+ }
+
+ return promise;
+
+}, "Tests that multiple write requests are buffered");
+
+done();
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-with-window-tracks.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-with-window-tracks.https.html
new file mode 100644
index 0000000000..7d6b2c243a
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor-with-window-tracks.https.html
@@ -0,0 +1,77 @@
+<!doctype html>
+<html>
+<head>
+ <title>MediaStreamTrackProcessor</title>
+ <link rel="help" href="https://w3c.github.io/mediacapture-insertable-streams">
+</head>
+<body>
+<p class="instructions">When prompted, use the accept button to give permission to use your audio and video devices.</p>
+<h1 class="instructions">Description</h1>
+<p class="instructions">This test checks that MediaStreamTrackProcessor works as expected on video MediaStreamTracks.</p>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script src=/resources/testdriver.js></script>
+<script src=/resources/testdriver-vendor.js></script>
+<script src='/mediacapture-streams/permission-helper.js'></script>
+<script>
+async function createWorker(script) {
+ script = script + "self.postMessage('ready');";
+ const blob = new Blob([script], { type: 'text/javascript' });
+ const url = URL.createObjectURL(blob);
+ const worker = new Worker(url);
+ await new Promise(resolve => worker.onmessage = () => {
+ resolve();
+ });
+ URL.revokeObjectURL(url);
+ return worker;
+}
+
+promise_test(async t => {
+ const stream = await navigator.mediaDevices.getUserMedia({video: true});
+ const track = stream.getTracks()[0];
+ const worker = await createWorker(`
+ let track;
+ onmessage = async msg => {
+ if (msg.data.type === "stop") {
+ track.stop();
+ return;
+ }
+ track = msg.data.track;
+ const processor = new MediaStreamTrackProcessor({track});
+ const reader = processor.readable.getReader();
+ let readResult = await reader.read();
+ postMessage(readResult.value);
+ readResult.value.close();
+ // Continue reading until the stream is done due to a track.stop()
+ while (true) {
+ readResult = await reader.read();
+ if (readResult.done) {
+ break;
+ } else {
+ readResult.value.close();
+ }
+ }
+ await reader.closed;
+ postMessage('closed');
+ }
+ `);
+
+ worker.postMessage({ type: "start", track }, [track]);
+
+ return new Promise(resolve => {
+ worker.onmessage = t.step_func(msg => {
+ if (msg.data instanceof VideoFrame) {
+ msg.data.close();
+ worker.postMessage({ type: "stop" });
+ } else if (msg.data == 'closed') {
+ resolve();
+ } else {
+ assert_unreached();
+ }
+ })
+ });
+}, "Tests that the reader of a video MediaStreamTrackProcessor produces VideoFrame objects and is closed on track stop while running on a worker");
+
+</script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor.worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor.worker.js
new file mode 100644
index 0000000000..000078c1d3
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/MediaStreamTrackProcessor.worker.js
@@ -0,0 +1,47 @@
+// META: title=MediaStreamTrackProcessor tests.
+
+importScripts("/resources/testharness.js");
+
+function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(100, 100);
+ const ctx = canvas.getContext('2d');
+ return new VideoFrame(canvas, {timestamp});
+}
+
+promise_test(async t => {
+ // The generator will be used as the source for the processor to
+ // produce frames in a controlled manner.
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+ // Use a larger maxBufferSize than the default to ensure no frames
+ // will be dropped.
+ const processor = new MediaStreamTrackProcessor({track: generator.track, maxBufferSize:10});
+ const reader = processor.readable.getReader();
+ const writer = generator.writable.getWriter();
+
+ let numReads = 0;
+ let resolve = null;
+ const promise = new Promise(r => resolve = r);
+
+ const numOperations = 4;
+ // Issue reads without waiting for the frames to arrive.
+ for (let i = 0; i < numOperations; i++) {
+ reader.read().then(dv=> {
+ dv.value.close();
+ if (++numReads == numOperations)
+ resolve();
+ });
+ }
+
+ // Write video frames in different tasks to "slowly" settle the pending read
+ // requests.
+ for (let i = 0; i<numOperations; i++) {
+ await writer.write(makeVideoFrame(i));
+ await new Promise(r=>t.step_timeout(r,0));
+ }
+
+ return promise;
+
+}, "Tests that multiple read requests are eventually settled");
+
+done();
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator-with-window-tracks.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator-with-window-tracks.https.html
new file mode 100644
index 0000000000..36fc4135e2
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator-with-window-tracks.https.html
@@ -0,0 +1,280 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>MediaStream Insertable Streams - VideoTrackGenerator</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/webrtc/RTCPeerConnection-helper.js"></script>
+</head>
+<body>
+ <h1 class="instructions">Description</h1>
+ <p class="instructions">This test checks that generating video MediaStreamTracks from VideoTrackGenerator works as expected.</p>
+ <script id="scriptRoutines">
+ const pixelColour = [50, 100, 150, 255];
+ const height = 240;
+ const width = 320;
+ function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(width, height);
+
+ const ctx = canvas.getContext('2d', {alpha: false});
+ ctx.fillStyle = `rgba(${pixelColour.join()})`;
+ ctx.fillRect(0, 0, width, height);
+
+ return new VideoFrame(canvas, {timestamp, alpha: 'discard'});
+ }
+
+ async function getVideoFrame() {
+ const stream = await getNoiseStream({video: true});
+ const input_track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor(input_track);
+ const reader = processor.readable.getReader();
+ const result = await reader.read();
+ input_track.stop();
+ return result.value;
+ }
+
+ function assertPixel(t, bytes, expected, epsilon = 5) {
+ for (let i = 0; i < bytes.length; i++) {
+ t.step(() => {
+ assert_less_than(Math.abs(bytes[i] - expected[i]), epsilon, "Mismatched pixel");
+ });
+ }
+ }
+
+ async function initiateSingleTrackCall(t, track, output) {
+ const caller = new RTCPeerConnection();
+ t.add_cleanup(() => caller.close());
+ const callee = new RTCPeerConnection();
+ t.add_cleanup(() => callee.close());
+ caller.addTrack(track);
+ t.add_cleanup(() => track.stop());
+
+ exchangeIceCandidates(caller, callee);
+ // Wait for the first track.
+ const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
+ output.srcObject = new MediaStream([e.track]);
+ // Exchange answer.
+ await exchangeAnswer(caller, callee);
+ await waitForConnectionStateChange(callee, ['connected']);
+ }
+ </script>
+ <script>
+ async function createWorker(script) {
+ script = scriptRoutines.text + script + "self.postMessage('ready');";
+ const blob = new Blob([script], { type: 'text/javascript' });
+ const url = URL.createObjectURL(blob);
+ const worker = new Worker(url);
+ await new Promise(resolve => worker.onmessage = () => {
+ resolve();
+ });
+ URL.revokeObjectURL(url);
+ return worker;
+ }
+
+ promise_test(async t => {
+ const worker = await createWorker(`
+ const generator = new VideoTrackGenerator();
+ const videoFrame = makeVideoFrame(1);
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ self.onmessage = async (event) => {
+ if (event.data == "transfer") {
+ self.postMessage({ track: generator.track, originalWidth, originalHeight }, [generator.track]);
+ return;
+ }
+ if (event.data == "write frame") {
+ generator.writable.getWriter().write(videoFrame)
+ return;
+ }
+ if (event.data == "cleanup") {
+ videoFrame.close();
+ return;
+ }
+ }
+ `);
+
+ t.add_cleanup(() => worker.postMessage("cleanup"));
+
+ worker.postMessage("transfer");
+ const { track, originalWidth, originalHeight } = await new Promise(resolve => worker.onmessage = e => resolve(e.data));
+ t.add_cleanup(() => track.stop());
+
+ const video = document.createElement("video");
+ video.autoplay = true;
+ video.width = 320;
+ video.height = 240;
+ video.srcObject = new MediaStream([track]);
+ video.play();
+
+ // Wait for the video element to be connected to the generator and
+ // generate the frame.
+ video.onloadstart = () => worker.postMessage("write frame");
+
+ return new Promise((resolve)=> {
+ video.ontimeupdate = t.step_func(() => {
+ const canvas = document.createElement("canvas");
+ canvas.width = originalWidth;
+ canvas.height = originalHeight;
+ const context = canvas.getContext('2d');
+ context.drawImage(video, 0, 0);
+ // Pick a pixel in the centre of the video and check that it has the colour of the frame provided.
+ const pixel = context.getImageData(originalWidth/2, originalHeight/2, 1, 1);
+ assertPixel(t, pixel.data, pixelColour);
+ resolve();
+ });
+ });
+ }, 'Tests that frames are actually rendered correctly in a stream used for a video element.');
+
+ promise_test(async t => {
+ const worker = await createWorker(`
+ const generator = new VideoTrackGenerator();
+ const videoFrame = makeVideoFrame(1);
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ let intervalId;
+ self.onmessage = async (event) => {
+ if (event.data == "transfer") {
+ self.postMessage({ track: generator.track}, [generator.track]);
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ intervalId = setInterval(async () => {
+ timestamp++;
+ await writer.write(makeVideoFrame(timestamp));
+ }, 40);
+ return;
+ }
+ }
+ `);
+
+ worker.postMessage("transfer");
+ const { track } = await new Promise(resolve => worker.onmessage = e => resolve(e.data));
+ t.add_cleanup(() => track.stop());
+
+ const video = document.createElement('video');
+ video.autoplay = true;
+ video.width = width;
+ video.height = height;
+ video.muted = true;
+
+ await initiateSingleTrackCall(t, track, video);
+ return new Promise(resolve => {
+ video.requestVideoFrameCallback(t.step_func(() => {
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ const context = canvas.getContext('2d');
+ context.drawImage(video, 0, 0);
+ // Pick a pixel in the centre of the video and check that it has the
+ // colour of the frame provided.
+ const pixel = context.getImageData(width / 2, height / 2, 1, 1);
+ // Encoding/decoding can add noise, so increase the threshhold to 8.
+ assertPixel(t, pixel.data, pixelColour, 8);
+ resolve();
+ }));
+ });
+ }, 'Tests that frames are actually rendered correctly in a stream sent over a peer connection.');
+
+ promise_test(async t => {
+ const colorUL = [255, 0, 0, 255];
+ const colorUR = [255, 255, 0, 255];
+ const colorLL = [0, 255, 0, 255];
+ const colorLR = [0, 255, 255, 255];
+ const worker = await createWorker(`
+ const generator = new VideoTrackGenerator();
+ const videoFrame = makeVideoFrame(1);
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ let intervalId;
+ self.onmessage = async (event) => {
+ if (event.data == "transfer") {
+ self.postMessage({ track: generator.track}, [generator.track]);
+ const inputCanvas = new OffscreenCanvas(width, height);
+ const inputContext = inputCanvas.getContext('2d', {alpha: false});
+ // draw four quadrants
+ inputContext.fillStyle = \`rgba(${colorUL.join()})\`;
+ inputContext.fillRect(0, 0, width / 2, height / 2);
+ inputContext.fillStyle = \`rgba(${colorUR.join()})\`;
+ inputContext.fillRect(width / 2, 0, width / 2, height / 2);
+ inputContext.fillStyle = \`rgba(${colorLL.join()})\`;
+ inputContext.fillRect(0, height / 2, width / 2, height / 2);
+ inputContext.fillStyle = \`rgba(${colorLR.join()})\`;
+ inputContext.fillRect(width / 2, height / 2, width / 2, height / 2);
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(async () => {
+ timestamp++;
+ await writer.write(new VideoFrame(inputCanvas, {timestamp: timestamp, alpha: 'discard'}));
+ }, 40);
+ return;
+ }
+ if (event.data.type === "getVideoFrame") {
+ const processor = new MediaStreamTrackProcessor({ track: event.data.track });
+ const reader = processor.readable.getReader();
+ const frame = (await reader.read()).value;
+ self.postMessage({frame}, [frame])
+ event.data.track.stop();
+ }
+ }
+ `);
+
+ worker.postMessage("transfer");
+ const { track } = await new Promise(resolve => worker.onmessage = e => resolve(e.data));
+ t.add_cleanup(() => track.stop());
+
+ const caller = new RTCPeerConnection();
+ t.add_cleanup(() => caller.close());
+ const callee = new RTCPeerConnection();
+ t.add_cleanup(() => callee.close());
+ const sender = caller.addTrack(track);
+
+ exchangeIceCandidates(caller, callee);
+ // Wait for the first track.
+ const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
+
+ // Exchange answer.
+ await exchangeAnswer(caller, callee);
+ await waitForConnectionStateChange(callee, ['connected']);
+ const params = sender.getParameters();
+ params.encodings.forEach(e => e.scaleResolutionDownBy = 2);
+ sender.setParameters(params);
+
+ // The first frame may not have had scaleResolutionDownBy applied
+ const numTries = 5;
+ for (let i = 1; i <= numTries; i++) {
+ worker.postMessage({type:"getVideoFrame", track: e.track}, [e.track]);
+ const {frame: outputFrame} = await new Promise(resolve => worker.onmessage = e => resolve(e.data));
+ if (outputFrame.displayWidth !== width / 2) {
+ assert_less_than(i, numTries, `First ${numTries} frames were the wrong size.`);
+ outputFrame.close();
+ continue;
+ }
+
+ assert_equals(outputFrame.displayWidth, width / 2);
+ assert_equals(outputFrame.displayHeight, height / 2);
+
+ const outputCanvas = new OffscreenCanvas(width / 2, height / 2);
+ const outputContext = outputCanvas.getContext('2d', {alpha: false});
+ outputContext.drawImage(outputFrame, 0, 0);
+ outputFrame.close();
+ // Check the four quadrants
+ const pixelUL = outputContext.getImageData(width / 8, height / 8, 1, 1);
+ assertPixel(t, pixelUL.data, colorUL);
+ const pixelUR =
+ outputContext.getImageData(width * 3 / 8, height / 8, 1, 1);
+ assertPixel(t, pixelUR.data, colorUR);
+ const pixelLL =
+ outputContext.getImageData(width / 8, height * 3 / 8, 1, 1);
+ assertPixel(t, pixelLL.data, colorLL);
+ const pixelLR =
+ outputContext.getImageData(width * 3 / 8, height * 3 / 8, 1, 1);
+ assertPixel(t, pixelLR.data, colorLR);
+ break;
+ }
+ }, 'Tests that frames are sent correctly with RTCRtpEncodingParameters.scaleResolutionDownBy.');
+
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator.worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator.worker.js
new file mode 100644
index 0000000000..37caf1a37f
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/VideoTrackGenerator.worker.js
@@ -0,0 +1,233 @@
+// META: title=VideoTrackGenerator tests.
+
+importScripts("/resources/testharness.js");
+
+function make_audio_data(timestamp, channels, sampleRate, frames) {
+ let data = new Float32Array(frames*channels);
+
+ // This generates samples in a planar format.
+ for (var channel = 0; channel < channels; channel++) {
+ let hz = 100 + channel * 50; // sound frequency
+ let base_index = channel * frames;
+ for (var i = 0; i < frames; i++) {
+ let t = (i / sampleRate) * hz * (Math.PI * 2);
+ data[base_index + i] = Math.sin(t);
+ }
+ }
+
+ return new AudioData({
+ timestamp: timestamp,
+ data: data,
+ numberOfChannels: channels,
+ numberOfFrames: frames,
+ sampleRate: sampleRate,
+ format: "f32-planar",
+ });
+}
+
+const pixelColour = [50, 100, 150, 255];
+const height = 240;
+const width = 320;
+function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(width, height);
+
+ const ctx = canvas.getContext('2d', {alpha: false});
+ ctx.fillStyle = `rgba(${pixelColour.join()})`;
+ ctx.fillRect(0, 0, width, height);
+
+ return new VideoFrame(canvas, {timestamp, alpha: 'discard'});
+}
+
+promise_test(async t => {
+ const videoFrame = makeVideoFrame(1);
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ const originalTimestamp = videoFrame.timestamp;
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
+ // that |processor| actually forwards the frames written to its writable
+ // field.
+ const processor = new MediaStreamTrackProcessor(generator);
+ const reader = processor.readable.getReader();
+ const readerPromise = new Promise(async resolve => {
+ const result = await reader.read();
+ t.add_cleanup(() => result.value.close());
+ t.step_func(() => {
+ assert_equals(result.value.displayWidth, originalWidth);
+ assert_equals(result.value.displayHeight, originalHeight);
+ assert_equals(result.value.timestamp, originalTimestamp);
+ })();
+ resolve();
+ });
+
+ generator.writable.getWriter().write(videoFrame);
+ return readerPromise;
+}, 'Tests that VideoTrackGenerator forwards frames to sink');
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ await writer.write(frame);
+
+ assert_equals(generator.track.kind, "video");
+ assert_equals(generator.track.readyState, "live");
+}, "Tests that creating a VideoTrackGenerator works as expected");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ await writer.write(frame);
+
+ assert_throws_dom("InvalidStateError", () => frame.clone(), "VideoFrame wasn't destroyed on write.");
+}, "Tests that VideoFrames are destroyed on write");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+
+ if (!self.AudioData)
+ return;
+
+ const defaultInit = {
+ timestamp: 1234,
+ channels: 2,
+ sampleRate: 8000,
+ frames: 100,
+ };
+ const audioData = make_audio_data(defaultInit.timestamp, defaultInit.channels, defaultInit.sampleRate,
+ defaultInit.frames);
+
+ await promise_rejects_js(t, TypeError, writer.write("test"));
+}, "Generator writer rejects on mismatched media input");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+ await promise_rejects_js(t, TypeError, writer.write("potatoe"));
+}, "Generator writer rejects on non media input");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+
+ const writer = generator.writable.getWriter();
+ const frame1 = makeVideoFrame(1);
+ t.add_cleanup(() => frame1.close());
+ await writer.write(frame1);
+ assert_equals(frame1.codedWidth, 0);
+
+ generator.track.stop();
+
+ await writer.closed;
+
+ const frame2 = makeVideoFrame(1);
+ t.add_cleanup(() => frame2.close());
+ await promise_rejects_js(t, TypeError, writer.write(frame2));
+
+ assert_equals(frame2.codedWidth, 320);
+}, "A writer rejects when generator's track is stopped");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ generator.muted = true;
+
+ const writer = generator.writable.getWriter();
+ const frame1 = makeVideoFrame(1);
+ t.add_cleanup(() => frame1.close());
+ await writer.write(frame1);
+ assert_equals(frame1.codedWidth, 0);
+
+ generator.track.stop();
+
+ await writer.closed;
+
+ const frame2 = makeVideoFrame(1);
+ t.add_cleanup(() => frame2.close());
+ await promise_rejects_js(t, TypeError, writer.write(frame2));
+
+ assert_equals(frame2.codedWidth, 320);
+}, "A muted writer rejects when generator's track is stopped");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+
+ const writer = generator.writable.getWriter();
+ const frame1 = makeVideoFrame(1);
+ t.add_cleanup(() => frame1.close());
+ await writer.write(frame1);
+ assert_equals(frame1.codedWidth, 0);
+
+ const clonedTrack = generator.track.clone();
+ generator.track.stop();
+
+ await new Promise(resolve => t.step_timeout(resolve, 100));
+
+ const frame2 = makeVideoFrame(1);
+ t.add_cleanup(() => frame2.close());
+ await writer.write(frame2);
+ assert_equals(frame2.codedWidth, 0);
+
+ clonedTrack.stop();
+
+ await writer.closed;
+
+ const frame3 = makeVideoFrame(1);
+ t.add_cleanup(() => frame3.close());
+ await promise_rejects_js(t, TypeError, writer.write(frame3));
+
+ assert_equals(frame3.codedWidth, 320);
+}, "A writer rejects when generator's track and clones are stopped");
+
+promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
+ // that |processor| actually forwards the frames written to its writable
+ // field.
+ const processor = new MediaStreamTrackProcessor(generator);
+ const reader = processor.readable.getReader();
+ const videoFrame = makeVideoFrame(1);
+
+ const writer = generator.writable.getWriter();
+ const videoFrame1 = makeVideoFrame(1);
+ writer.write(videoFrame1);
+ const result1 = await reader.read();
+ t.add_cleanup(() => result1.value.close());
+ assert_equals(result1.value.timestamp, 1);
+ generator.muted = true;
+
+ // This frame is expected to be discarded.
+ const videoFrame2 = makeVideoFrame(2);
+ writer.write(videoFrame2);
+ generator.muted = false;
+
+ const videoFrame3 = makeVideoFrame(3);
+ writer.write(videoFrame3);
+ const result3 = await reader.read();
+ t.add_cleanup(() => result3.value.close());
+ assert_equals(result3.value.timestamp, 3);
+
+ // Set up a read ahead of time, then mute, enqueue and unmute.
+ const promise5 = reader.read();
+ generator.muted = true;
+ writer.write(makeVideoFrame(4)); // Expected to be discarded.
+ generator.muted = false;
+ writer.write(makeVideoFrame(5));
+ const result5 = await promise5;
+ t.add_cleanup(() => result5.value.close());
+ assert_equals(result5.value.timestamp, 5);
+}, 'Tests that VideoTrackGenerator forwards frames only when unmuted');
+
+done();
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/idlharness.any.js b/testing/web-platform/tests/mediacapture-insertable-streams/idlharness.any.js
new file mode 100644
index 0000000000..594753cc1a
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/idlharness.any.js
@@ -0,0 +1,14 @@
+// META: global=dedicatedworker
+// META: script=/resources/WebIDLParser.js
+// META: script=/resources/idlharness.js
+
+idl_test(
+ ['mediacapture-transform'],
+ ['dom', 'html'],
+ idl_array => {
+ idl_array.add_objects({
+ MediaStreamTrackProcessor: ['new MediaStreamTrackProcessor({ track: new VideoTrackGenerator() })'],
+ VideoTrackGenerator: ['new VideoTrackGenerator()'],
+ });
+ }
+);
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-audio.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-audio.https.html
new file mode 100644
index 0000000000..83a4f21b4a
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-audio.https.html
@@ -0,0 +1,97 @@
+<!doctype html>
+<html>
+
+<head>
+ <title>MediaStreamTrackGenerator</title>
+ <link rel="help" href="https://w3c.github.io/mediacapture-insertable-streams">
+</head>
+
+<body>
+ <p class="instructions">When prompted, use the accept button to give permission to use your audio and video devices.</p>
+ <h1 class="instructions">Description</h1>
+ <p class="instructions">This test checks that generating audio MediaStreamTracks works as expected.</p>
+ <audio id="audioElement" autoplay=true></audio>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src=/resources/testdriver.js></script>
+ <script src=/resources/testdriver-vendor.js></script>
+ <script src='/mediacapture-streams/permission-helper.js'></script>
+ <script>
+
+ function makeAudioData(timestamp) {
+ const sampleRate = 30000;
+
+ let frames = sampleRate / 10;
+ let channels = 1;
+
+ // Generate a simple sin wave, so we have something.
+ let data = new Float32Array(frames*channels);
+ const hz = 100; // sound frequency
+ for (let i = 0; i < data.length; i++) {
+ const t = (i / sampleRate) * hz * (Math.PI * 2);
+ data[i] = Math.sin(t);
+ }
+
+ return new AudioData({
+ timestamp: timestamp,
+ numberOfFrames: frames,
+ numberOfChannels: channels,
+ sampleRate: sampleRate,
+ data: data,
+ format: "f32",
+ });
+ }
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator("audio");
+
+ const writer = generator.writable.getWriter();
+ await writer.write(makeAudioData(1));
+
+ assert_equals(generator.kind, "audio");
+ assert_equals(generator.readyState, "live");
+
+ t.add_cleanup(() => generator.stop());
+ }, "Tests that creating a Audio MediaStreamTrackGenerator works as expected");
+
+ promise_test(async t => {
+ assert_throws_js(TypeError, () => { new MediaStreamTrackGenerator({ kind: "invalid kind" }) });
+ }, "Creating Generator with an invalid kind throws");
+
+ promise_test(async t => {
+ await setMediaPermission();
+ const capturedStream = await navigator.mediaDevices.getUserMedia({ audio: true });
+ assert_equals(capturedStream.getAudioTracks().length, 1);
+ const upstreamTrack = capturedStream.getAudioTracks()[0];
+ t.add_cleanup(() => upstreamTrack.stop());
+
+ assert_throws_js(TypeError, () => { new MediaStreamTrackGenerator() });
+ }, "Creating Generator with a missing kind throws");
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator({ kind: "video" });
+ t.add_cleanup(() => generator.stop());
+
+ const writer = generator.writable.getWriter();
+ const data = makeAudioData(1);
+
+ writer.write(data).then(t.step_func(() => assert_unreached("Write should reject")), t.step_func(f => assert_true(f instanceof TypeError, "write rejects with a TypeError")));
+ }, "Mismatched data and generator kind throws on write.");
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator("audio");
+ t.add_cleanup(() => generator.stop());
+
+ const audioElement = document.getElementById("audioElement");
+ audioElement.srcObject = new MediaStream([generator]);
+ await audioElement.play();
+
+ const writer = generator.writable.getWriter();
+ await writer.write(makeAudioData(1));
+
+ // Wait for audio playout to actually happen.
+ await t.step_wait(() => audioElement.currentTime > 0, "audioElement played out generated track");
+ }, "Tests that audio actually flows to a connected audio element");
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-service-worker.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-service-worker.https.html
new file mode 100644
index 0000000000..389a30d0d9
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-service-worker.https.html
@@ -0,0 +1,24 @@
+<!doctype html>
+<title>Test initialize MediaStreamTrackGenerator in a service worker</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src='/service-workers/service-worker/resources/test-helpers.sub.js'></script>
+<script>
+'use strict';
+
+promise_test(async t => {
+ const registration = await navigator.serviceWorker.register('service-worker.js');
+ await wait_for_state(t, registration.installing, 'activated');
+ const result = new Promise((resolve, reject) => {
+ navigator.serviceWorker.addEventListener('message', (e) => {
+ if (e.data.result === 'Failure') {
+ reject('Failed with error ' + e.data.error);
+ } else {
+ resolve();
+ }
+ });
+ });
+ registration.active.postMessage('hello world');
+ return result;
+}, 'A service worker is able to initialize a MediaStreamTrackGenerator without crashing');
+</script> \ No newline at end of file
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-shared-worker.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-shared-worker.https.html
new file mode 100644
index 0000000000..deecfccad1
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-shared-worker.https.html
@@ -0,0 +1,22 @@
+<!doctype html>
+<title>Test initialize MediaStreamTrackGenerator in a shared worker</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+'use strict';
+
+promise_test(async t => {
+ const worker = new SharedWorker('shared-worker.js');
+ const result = new Promise((resolve, reject) => {
+ worker.port.onmessage = (e) => {
+ if (e.data.result === 'Failure') {
+ reject('Failed with error ' + e.data.error);
+ } else {
+ resolve();
+ }
+ };
+ });
+ worker.port.postMessage('Hello world');
+ return result;
+}, 'A shared worker is able to initialize a MediaStreamTrackGenerator without crashing');
+</script> \ No newline at end of file
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-worker.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-worker.https.html
new file mode 100644
index 0000000000..e0a8f2fc27
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-in-worker.https.html
@@ -0,0 +1,39 @@
+<!doctype html>
+<title>Test creation of MediaStreamTrackGenerator in a worker</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+'use strict';
+
+function initWorker(){
+ const worker = new Worker('dedicated-worker.js');
+ const result = new Promise((resolve, reject) => {
+ worker.onmessage = (e) => {
+ if (e.data.result === 'Failure') {
+ reject('Failed with error ' + e.data.error);
+ } else {
+ resolve();
+ }
+ };
+ });
+ return [worker,result];
+}
+
+promise_test(async t => {
+ const [worker,result] = initWorker();
+ worker.postMessage({msg: 'Hello there'});
+ return result;
+}, 'A worker is able to initialize a MediaStreamTrackGenerator without crashing');
+
+promise_test(async t => {
+ const [worker,result] = initWorker();
+ worker.postMessage({enable: true});
+ return result;
+}, 'A worker is able to enable a MediaStreamTrackGenerator without crashing');
+
+promise_test(async t => {
+ const [worker,result] = initWorker();
+ worker.postMessage({enable: false});
+ return result;
+}, 'A worker is able to disable a MediaStreamTrackGenerator without crashing');
+</script>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-pipes-data-in-worker.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-pipes-data-in-worker.https.html
new file mode 100644
index 0000000000..61a6f038c4
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-pipes-data-in-worker.https.html
@@ -0,0 +1,41 @@
+<!doctype html>
+<title>Test piping data through MediaStreamTrackGenerator in a worker</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script id="workerCode" type="javascript/worker">
+self.onmessage = (e) => {
+ try {
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ e.data.readable.pipeTo(generator.writable);
+ self.postMessage({result: 'Success'});
+ } catch (e) {
+ self.postMessage({result: 'Failure', error: e});
+ }
+}
+</script>
+<script>
+'use strict';
+
+promise_test(async t => {
+ const workerBlob = new Blob([document.querySelector('#workerCode').textContent],
+ { type: "text/javascript" });
+ const workerUrl = window.URL.createObjectURL(workerBlob);
+ const worker = new Worker(workerUrl);
+ window.URL.revokeObjectURL(workerUrl);
+ const result = new Promise((resolve, reject) => {
+ worker.onmessage = (e) => {
+ if (e.data.result === 'Failure') {
+ reject('Failed with error ' + e.data.error);
+ } else {
+ resolve();
+ }
+ };
+ });
+ const stream = await navigator.mediaDevices.getUserMedia({ video: true });
+ const track = stream.getVideoTracks()[0];
+ const processor = new MediaStreamTrackProcessor({ track: track });
+ worker.postMessage({ readable: processor.readable },
+ [processor.readable]);
+ return result;
+}, 'A worker is able to pipe data through a MediaStreamTrackGenerator without crashing');
+</script> \ No newline at end of file
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-video.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-video.https.html
new file mode 100644
index 0000000000..a6f73f009c
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackGenerator-video.https.html
@@ -0,0 +1,285 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>MediaStream Insertable Streams - Video</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/webrtc/RTCPeerConnection-helper.js"></script>
+</head>
+<body>
+ <p class="instructions">When prompted, use the accept button to give permission to use your audio and video devices.</p>
+ <h1 class="instructions">Description</h1>
+ <p class="instructions">This test checks that generating video MediaStreamTracks works as expected.</p>
+ <script>
+
+ const pixelColour = [50, 100, 150, 255];
+ const height = 240;
+ const width = 320;
+ function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(width, height);
+
+ const ctx = canvas.getContext('2d', {alpha: false});
+ ctx.fillStyle = `rgba(${pixelColour.join()})`;
+ ctx.fillRect(0, 0, width, height);
+
+ return new VideoFrame(canvas, {timestamp, alpha: 'discard'});
+ }
+
+ async function getVideoFrame() {
+ const stream = await getNoiseStream({video: true});
+ const input_track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor(input_track);
+ const reader = processor.readable.getReader();
+ const result = await reader.read();
+ input_track.stop();
+ return result.value;
+ }
+
+ function assertPixel(t, bytes, expected, epsilon = 5) {
+ for (let i = 0; i < bytes.length; i++) {
+ t.step(() => {
+ assert_less_than(Math.abs(bytes[i] - expected[i]), epsilon, "Mismatched pixel");
+ });
+ }
+ }
+
+ async function initiateSingleTrackCall(t, track, output) {
+ const caller = new RTCPeerConnection();
+ t.add_cleanup(() => caller.close());
+ const callee = new RTCPeerConnection();
+ t.add_cleanup(() => callee.close());
+ caller.addTrack(track);
+ t.add_cleanup(() => track.stop());
+
+ exchangeIceCandidates(caller, callee);
+ // Wait for the first track.
+ const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
+ output.srcObject = new MediaStream([e.track]);
+ // Exchange answer.
+ await exchangeAnswer(caller, callee);
+ await waitForConnectionStateChange(callee, ['connected']);
+ }
+
+ promise_test(async t => {
+ const videoFrame = await getVideoFrame();
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ const originalTimestamp = videoFrame.timestamp;
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ t.add_cleanup(() => generator.stop());
+
+ // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
+ // that |processor| actually forwards the frames written to its writable
+ // field.
+ const processor = new MediaStreamTrackProcessor(generator);
+ const reader = processor.readable.getReader();
+ const readerPromise = new Promise(async resolve => {
+ const result = await reader.read();
+ assert_equals(result.value.displayWidth, originalWidth);
+ assert_equals(result.value.displayHeight, originalHeight);
+ assert_equals(result.value.timestamp, originalTimestamp);
+ resolve();
+ });
+
+ generator.writable.getWriter().write(videoFrame);
+
+ return readerPromise;
+ }, 'Tests that MediaStreamTrackGenerator forwards frames to sink');
+
+ promise_test(async t => {
+ const videoFrame = makeVideoFrame(1);
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ const generator = new MediaStreamTrackGenerator({ kind: 'video' });
+ t.add_cleanup(() => generator.stop());
+
+ const video = document.createElement("video");
+ video.autoplay = true;
+ video.width = 320;
+ video.height = 240;
+ video.srcObject = new MediaStream([generator]);
+ video.play();
+
+ // Wait for the video element to be connected to the generator and
+ // generate the frame.
+ video.onloadstart = () => generator.writable.getWriter().write(videoFrame);
+
+ return new Promise((resolve)=> {
+ video.ontimeupdate = t.step_func(() => {
+ const canvas = document.createElement("canvas");
+ canvas.width = originalWidth;
+ canvas.height = originalHeight;
+ const context = canvas.getContext('2d');
+ context.drawImage(video, 0, 0);
+ // Pick a pixel in the centre of the video and check that it has the colour of the frame provided.
+ const pixel = context.getImageData(videoFrame.displayWidth/2, videoFrame.displayHeight/2, 1, 1);
+ assertPixel(t, pixel.data, pixelColour);
+ resolve();
+ });
+ });
+ }, 'Tests that frames are actually rendered correctly in a stream used for a video element.');
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ t.add_cleanup(() => generator.stop());
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(
+ t.step_func(async () => {
+ if (generator.readyState === 'live') {
+ timestamp++;
+ await writer.write(makeVideoFrame(timestamp));
+ }
+ }),
+ 40);
+ t.add_cleanup(() => clearInterval(intervalId));
+
+ const video = document.createElement('video');
+ video.autoplay = true;
+ video.width = width;
+ video.height = height;
+ video.muted = true;
+
+ await initiateSingleTrackCall(t, generator, video);
+
+ return new Promise(resolve => {
+ video.ontimeupdate = t.step_func(() => {
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ const context = canvas.getContext('2d');
+ context.drawImage(video, 0, 0);
+ // Pick a pixel in the centre of the video and check that it has the
+ // colour of the frame provided.
+ const pixel = context.getImageData(width / 2, height / 2, 1, 1);
+ // Encoding/decoding can add noise, so increase the threshhold to 8.
+ assertPixel(t, pixel.data, pixelColour, 8);
+ resolve();
+ });
+ });
+ }, 'Tests that frames are actually rendered correctly in a stream sent over a peer connection.');
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ t.add_cleanup(() => generator.stop());
+
+ const inputCanvas = new OffscreenCanvas(width, height);
+
+ const inputContext = inputCanvas.getContext('2d', {alpha: false});
+ // draw four quadrants
+ const colorUL = [255, 0, 0, 255];
+ inputContext.fillStyle = `rgba(${colorUL.join()})`;
+ inputContext.fillRect(0, 0, width / 2, height / 2);
+ const colorUR = [255, 255, 0, 255];
+ inputContext.fillStyle = `rgba(${colorUR.join()})`;
+ inputContext.fillRect(width / 2, 0, width / 2, height / 2);
+ const colorLL = [0, 255, 0, 255];
+ inputContext.fillStyle = `rgba(${colorLL.join()})`;
+ inputContext.fillRect(0, height / 2, width / 2, height / 2);
+ const colorLR = [0, 255, 255, 255];
+ inputContext.fillStyle = `rgba(${colorLR.join()})`;
+ inputContext.fillRect(width / 2, height / 2, width / 2, height / 2);
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(
+ t.step_func(async () => {
+ if (generator.readyState === 'live') {
+ timestamp++;
+ await writer.write(new VideoFrame(
+ inputCanvas, {timestamp: timestamp, alpha: 'discard'}));
+ }
+ }),
+ 40);
+ t.add_cleanup(() => clearInterval(intervalId));
+
+ const caller = new RTCPeerConnection();
+ t.add_cleanup(() => caller.close());
+ const callee = new RTCPeerConnection();
+ t.add_cleanup(() => callee.close());
+ const sender = caller.addTrack(generator);
+
+ exchangeIceCandidates(caller, callee);
+ // Wait for the first track.
+ const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
+
+ // Exchange answer.
+ await exchangeAnswer(caller, callee);
+ await waitForConnectionStateChange(callee, ['connected']);
+ const params = sender.getParameters();
+ params.encodings.forEach(e => e.scaleResolutionDownBy = 2);
+ sender.setParameters(params);
+
+ const processor = new MediaStreamTrackProcessor(e.track);
+ const reader = processor.readable.getReader();
+
+ // The first frame may not have had scaleResolutionDownBy applied
+ const numTries = 5;
+ for (let i = 1; i <= numTries; i++) {
+ const {value: outputFrame} = await reader.read();
+ if (outputFrame.displayWidth !== width / 2) {
+ assert_less_than(i, numTries, `First ${numTries} frames were the wrong size.`);
+ outputFrame.close();
+ continue;
+ }
+
+ assert_equals(outputFrame.displayWidth, width / 2);
+ assert_equals(outputFrame.displayHeight, height / 2);
+
+ const outputCanvas = new OffscreenCanvas(width / 2, height / 2);
+ const outputContext = outputCanvas.getContext('2d', {alpha: false});
+ outputContext.drawImage(outputFrame, 0, 0);
+ outputFrame.close();
+ // Check the four quadrants
+ const pixelUL = outputContext.getImageData(width / 8, height / 8, 1, 1);
+ assertPixel(t, pixelUL.data, colorUL);
+ const pixelUR =
+ outputContext.getImageData(width * 3 / 8, height / 8, 1, 1);
+ assertPixel(t, pixelUR.data, colorUR);
+ const pixelLL =
+ outputContext.getImageData(width / 8, height * 3 / 8, 1, 1);
+ assertPixel(t, pixelLL.data, colorLL);
+ const pixelLR =
+ outputContext.getImageData(width * 3 / 8, height * 3 / 8, 1, 1);
+ assertPixel(t, pixelLR.data, colorLR);
+ break;
+ }
+ }, 'Tests that frames are sent correctly with RTCRtpEncodingParameters.scaleResolutionDownBy.');
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator("video");
+ t.add_cleanup(() => generator.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ await writer.write(frame);
+
+ assert_equals(generator.kind, "video");
+ assert_equals(generator.readyState, "live");
+ }, "Tests that creating a Video MediaStreamTrackGenerator works as expected");
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator("video");
+ t.add_cleanup(() => generator.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ await writer.write(frame);
+
+ assert_throws_dom("InvalidStateError", () => frame.clone(), "VideoFrame wasn't destroyed on write.");
+ }, "Tests that VideoFrames are destroyed on write.");
+
+ promise_test(async t => {
+ const generator = new MediaStreamTrackGenerator("audio");
+ t.add_cleanup(() => generator.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ assert_throws_js(TypeError, writer.write(frame));
+ }, "Mismatched frame and generator kind throws on write.");
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-backpressure.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-backpressure.https.html
new file mode 100644
index 0000000000..7b4f88e944
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-backpressure.https.html
@@ -0,0 +1,69 @@
+<!doctype html>
+<html>
+<head>
+ <title>MediaStreamTrackProcessor backpressure</title>
+ <link rel="help" href="https://w3c.github.io/mediacapture-insertable-streams">
+</head>
+<body>
+ <h1 class="instructions">Description</h1>
+<p class="instructions">This test checks that MediaStreamTrackProcessor handles backpressure from a WHATWG stream pipeline.</p>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script src=/resources/testdriver.js></script>
+<script src=/resources/testdriver-vendor.js></script>
+<script>
+
+ const height = 240;
+ const width = 320;
+
+ const inputCanvas = new OffscreenCanvas(width, height);
+ const inputCtx = inputCanvas.getContext('2d', {alpha: false});
+ inputCtx.fillStyle = 'black';
+ inputCtx.fillRect(0, 0, width, height);
+
+ const frameDuration = 40;
+
+ function makeUniformVideoFrame(timestamp) {
+ return new VideoFrame(inputCanvas, {timestamp, alpha: 'discard'});
+ }
+
+ promise_test(async t => {
+ // TODO: use "new VideoTrackGenerator"
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ t.add_cleanup(() => generator.stop());
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(
+ t.step_func(async () => {
+ if (generator.readyState === 'live') {
+ timestamp++;
+ await writer.write(makeUniformVideoFrame(timestamp));
+ }
+ }),
+ frameDuration);
+ t.add_cleanup(() => clearInterval(intervalId));
+ t.step_timeout(function() {
+ clearInterval(intervalId);
+ generator.stop();
+ }, 2000);
+ const processor = new MediaStreamTrackProcessor({track: generator});
+ let ts = 1;
+ await processor.readable.pipeTo(new WritableStream({
+ async write(frame) {
+ if (ts === 1) {
+ assert_equals(frame.timestamp, ts, "Timestamp mismatch");
+ } else {
+ assert_greater_than_equal(frame.timestamp, ts, "Backpressure should have resulted in skipping at least 3 frames");
+ }
+ frame.close();
+ ts+=3;
+ // Wait the equivalent of 3 frames
+ return new Promise((res) => t.step_timeout(res, 3*frameDuration));
+ }
+ }));
+ }, "Tests that backpressure forces MediaStreamTrackProcess to skip frames");
+</script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-video.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-video.https.html
new file mode 100644
index 0000000000..9f6043ee48
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-video.https.html
@@ -0,0 +1,97 @@
+<!doctype html>
+<html>
+<head>
+ <title>MediaStreamTrackProcessor</title>
+ <link rel="help" href="https://w3c.github.io/mediacapture-insertable-streams">
+</head>
+<body>
+<p class="instructions">When prompted, use the accept button to give permission to use your audio and video devices.</p>
+<h1 class="instructions">Description</h1>
+<p class="instructions">This test checks that MediaStreamTrackProcessor works as expected on video MediaStreamTracks.</p>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script src=/resources/testdriver.js></script>
+<script src=/resources/testdriver-vendor.js></script>
+<script src='/mediacapture-streams/permission-helper.js'></script>
+<script>
+promise_test(async t => {
+ await setMediaPermission("granted", ["camera"]);
+ const stream = await navigator.mediaDevices.getUserMedia({video: true});
+ const track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor({track: track});
+ const reader = processor.readable.getReader();
+ const readResult = await reader.read();
+ assert_false(readResult.done)
+ assert_true(readResult.value instanceof VideoFrame);
+ readResult.value.close();
+ track.stop();
+ return reader.closed;
+}, "Tests that the reader of a video MediaStreamTrackProcessor produces video frames and is closed on track stop");
+
+promise_test(async t => {
+ const stream = await navigator.mediaDevices.getUserMedia({video: true});
+ const track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor({track: track});
+ const worker = new Worker('MediaStreamTrackProcessor-worker.js');
+ const promise = new Promise(resolve => {
+ worker.onmessage = t.step_func(msg => {
+ if (msg.data instanceof VideoFrame) {
+ msg.data.close();
+ track.stop();
+ } else if (msg.data == 'closed') {
+ resolve();
+ } else {
+ assert_unreached();
+ }
+ })
+ });
+ worker.postMessage({readable: processor.readable},
+ [processor.readable]);
+ return promise;
+}, "Tests that the reader of a video MediaStreamTrackProcessor produces VideoFrame objects and is closed on track stop while running on a worker");
+
+function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(100, 100);
+ const ctx = canvas.getContext('2d');
+ return new VideoFrame(canvas, {timestamp});
+}
+
+promise_test(async t => {
+ // The generator will be used as the source for the processor to
+ // produce frames in a controlled manner.
+ const generator = new MediaStreamTrackGenerator('video');
+ t.add_cleanup(() => generator.stop());
+ // Use a larger maxBufferSize than the default to ensure no frames
+ // will be dropped.
+ const processor = new MediaStreamTrackProcessor({track: generator, maxBufferSize:10});
+ const reader = processor.readable.getReader();
+ const writer = generator.writable.getWriter();
+
+ let numReads = 0;
+ let resolve = null;
+ const promise = new Promise(r => resolve = r);
+
+ const numOperations = 4;
+ // Issue reads without waiting for the frames to arrive.
+ for (let i = 0; i < numOperations; i++) {
+ reader.read().then(dv=> {
+ dv.value.close();
+ if (++numReads == numOperations)
+ resolve();
+ });
+ }
+
+ // Write video frames in different tasks to "slowly" settle the pending read
+ // requests.
+ for (let i = 0; i<numOperations; i++) {
+ await writer.write(makeVideoFrame(i));
+ await new Promise(r=>t.step_timeout(r,0));
+ }
+
+ return promise;
+
+}, "Tests that multiple read requests are eventually settled");
+
+</script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-worker.js
new file mode 100644
index 0000000000..51eaef80a9
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/MediaStreamTrackProcessor-worker.js
@@ -0,0 +1,17 @@
+onmessage = async msg => {
+ const reader = msg.data.readable.getReader();
+ let readResult = await reader.read();
+ postMessage(readResult.value);
+ readResult.value.close();
+ // Continue reading until the stream is done due to a track.stop()
+ while (true) {
+ readResult = await reader.read();
+ if (readResult.done) {
+ break;
+ } else {
+ readResult.value.close();
+ }
+ }
+ await reader.closed;
+ postMessage('closed');
+}
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/dedicated-worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/dedicated-worker.js
new file mode 100644
index 0000000000..0dbcc32d0b
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/dedicated-worker.js
@@ -0,0 +1,11 @@
+self.onmessage = (e) => {
+ try {
+ const mstg = new MediaStreamTrackGenerator({kind: 'video'});
+ if ('enable' in e.data) {
+ mstg.enabled = e.data.enable;
+ }
+ self.postMessage({result: 'Success'});
+ } catch (e) {
+ self.postMessage({result: 'Failure', error: e});
+ }
+} \ No newline at end of file
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/service-worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/service-worker.js
new file mode 100644
index 0000000000..05a8b99ad8
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/service-worker.js
@@ -0,0 +1,8 @@
+self.addEventListener('message', (event) => {
+ try {
+ const mstg = new MediaStreamTrackGenerator({ kind: 'video' });
+ event.source.postMessage({ result: 'Success' });
+ } catch (e) {
+ event.source.postMessage({ result: 'Failure', error: e });
+ };
+}); \ No newline at end of file
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/legacy/shared-worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/shared-worker.js
new file mode 100644
index 0000000000..61ff67bcff
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/legacy/shared-worker.js
@@ -0,0 +1,11 @@
+onconnect = (e) => {
+ const port = e.ports[0];
+ port.onmessage = (e) => {
+ try {
+ const generator = new MediaStreamTrackGenerator({kind: 'video'});
+ port.postMessage({result: 'Success'});
+ } catch (e) {
+ port.postMessage({result: 'Failure', error: e});
+ }
+ }
+} \ No newline at end of file
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-audio.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-audio.https.html
new file mode 100644
index 0000000000..7ab7f2a525
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-audio.https.html
@@ -0,0 +1,54 @@
+<!doctype html>
+<html>
+<head>
+ <title>MediaStreamTrackProcessor</title>
+ <link rel="help" href="https://w3c.github.io/mediacapture-insertable-streams">
+</head>
+<body>
+<p class="instructions">When prompted, use the accept button to give permission to use your audio and video devices.</p>
+<h1 class="instructions">Description</h1>
+<p class="instructions">This test checks that MediaStreamTrackProcessor works as expected on audio MediaStreamTracks.</p>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script src=/resources/testdriver.js></script>
+<script src=/resources/testdriver-vendor.js></script>
+<script src='/mediacapture-streams/permission-helper.js'></script>
+<script>
+promise_test(async t => {
+ await setMediaPermission("granted", ["microphone"]);
+ const stream = await navigator.mediaDevices.getUserMedia({audio: true});
+ const track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor({track: track});
+ const reader = processor.readable.getReader();
+ const readResult = await reader.read();
+ assert_false(readResult.done)
+ assert_true(readResult.value instanceof AudioData);
+ readResult.value.close();
+ track.stop();
+ return reader.closed;
+}, "Tests that the reader of an audio MediaStreamTrackProcessor produces AudioData objects and is closed on track stop");
+
+promise_test(async t => {
+ const stream = await navigator.mediaDevices.getUserMedia({audio: true});
+ const track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor({track: track});
+ const worker = new Worker('MediaStreamTrackProcessor-worker.js');
+ const promise = new Promise(resolve => {
+ worker.onmessage = t.step_func(msg => {
+ if (msg.data instanceof AudioData) {
+ msg.data.close();
+ track.stop();
+ } else if (msg.data == 'closed') {
+ resolve();
+ } else {
+ assert_unreached();
+ }
+ })
+ });
+ worker.postMessage({readable: processor.readable},
+ [processor.readable]);
+ return promise;
+}, "Tests that the reader of an audio MediaStreamTrackProcessor produces AudioData objects and is closed on track stop while running on a worker");
+</script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-worker.js b/testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-worker.js
new file mode 100644
index 0000000000..51eaef80a9
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/tentative/MediaStreamTrackProcessor-worker.js
@@ -0,0 +1,17 @@
+onmessage = async msg => {
+ const reader = msg.data.readable.getReader();
+ let readResult = await reader.read();
+ postMessage(readResult.value);
+ readResult.value.close();
+ // Continue reading until the stream is done due to a track.stop()
+ while (true) {
+ readResult = await reader.read();
+ if (readResult.done) {
+ break;
+ } else {
+ readResult.value.close();
+ }
+ }
+ await reader.closed;
+ postMessage('closed');
+}
diff --git a/testing/web-platform/tests/mediacapture-insertable-streams/tentative/VideoTrackGenerator.https.html b/testing/web-platform/tests/mediacapture-insertable-streams/tentative/VideoTrackGenerator.https.html
new file mode 100644
index 0000000000..855ebb764a
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-insertable-streams/tentative/VideoTrackGenerator.https.html
@@ -0,0 +1,327 @@
+<!DOCTYPE html>
+<html>
+<head>
+<title>MediaStream Insertable Streams - VideoTrackGenerator</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/webrtc/RTCPeerConnection-helper.js"></script>
+</head>
+<body>
+ <p class="instructions">If prompted, use the accept button to give permission to use your audio and video devices.</p>
+ <h1 class="instructions">Description</h1>
+ <p class="instructions">This test checks that generating video MediaStreamTracks from VideoTrackGenerator works as expected.</p>
+ <script>
+
+ const pixelColour = [50, 100, 150, 255];
+ const height = 240;
+ const width = 320;
+ function makeVideoFrame(timestamp) {
+ const canvas = new OffscreenCanvas(width, height);
+
+ const ctx = canvas.getContext('2d', {alpha: false});
+ ctx.fillStyle = `rgba(${pixelColour.join()})`;
+ ctx.fillRect(0, 0, width, height);
+
+ return new VideoFrame(canvas, {timestamp, alpha: 'discard'});
+ }
+
+ async function getVideoFrame() {
+ const stream = await getNoiseStream({video: true});
+ const input_track = stream.getTracks()[0];
+ const processor = new MediaStreamTrackProcessor(input_track);
+ const reader = processor.readable.getReader();
+ const result = await reader.read();
+ input_track.stop();
+ return result.value;
+ }
+
+ function assertPixel(t, bytes, expected, epsilon = 5) {
+ for (let i = 0; i < bytes.length; i++) {
+ t.step(() => {
+ assert_less_than(Math.abs(bytes[i] - expected[i]), epsilon, "Mismatched pixel");
+ });
+ }
+ }
+
+ async function initiateSingleTrackCall(t, track, output) {
+ const caller = new RTCPeerConnection();
+ t.add_cleanup(() => caller.close());
+ const callee = new RTCPeerConnection();
+ t.add_cleanup(() => callee.close());
+ caller.addTrack(track);
+ t.add_cleanup(() => track.stop());
+
+ exchangeIceCandidates(caller, callee);
+ // Wait for the first track.
+ const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
+ output.srcObject = new MediaStream([e.track]);
+ // Exchange answer.
+ await exchangeAnswer(caller, callee);
+ await waitForConnectionStateChange(callee, ['connected']);
+ }
+
+ promise_test(async t => {
+ const videoFrame = await getVideoFrame();
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ const originalTimestamp = videoFrame.timestamp;
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
+ // that |processor| actually forwards the frames written to its writable
+ // field.
+ const processor = new MediaStreamTrackProcessor(generator);
+ const reader = processor.readable.getReader();
+ const readerPromise = new Promise(async resolve => {
+ const result = await reader.read();
+ assert_equals(result.value.displayWidth, originalWidth);
+ assert_equals(result.value.displayHeight, originalHeight);
+ assert_equals(result.value.timestamp, originalTimestamp);
+ resolve();
+ });
+
+ generator.writable.getWriter().write(videoFrame);
+ return readerPromise;
+ }, 'Tests that VideoTrackGenerator forwards frames to sink');
+
+ promise_test(async t => {
+ const videoFrame = makeVideoFrame(1);
+ const originalWidth = videoFrame.displayWidth;
+ const originalHeight = videoFrame.displayHeight;
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const video = document.createElement("video");
+ video.autoplay = true;
+ video.width = 320;
+ video.height = 240;
+ video.srcObject = new MediaStream([generator.track]);
+ video.play();
+
+ // Wait for the video element to be connected to the generator and
+ // generate the frame.
+ video.onloadstart = () => generator.writable.getWriter().write(videoFrame);
+
+ return new Promise((resolve)=> {
+ video.ontimeupdate = t.step_func(() => {
+ const canvas = document.createElement("canvas");
+ canvas.width = originalWidth;
+ canvas.height = originalHeight;
+ const context = canvas.getContext('2d');
+ context.drawImage(video, 0, 0);
+ // Pick a pixel in the centre of the video and check that it has the colour of the frame provided.
+ const pixel = context.getImageData(videoFrame.displayWidth/2, videoFrame.displayHeight/2, 1, 1);
+ assertPixel(t, pixel.data, pixelColour);
+ resolve();
+ });
+ });
+ }, 'Tests that frames are actually rendered correctly in a stream used for a video element.');
+
+ promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(
+ t.step_func(async () => {
+ if (generator.track.readyState === 'live') {
+ timestamp++;
+ await writer.write(makeVideoFrame(timestamp));
+ }
+ }),
+ 40);
+ t.add_cleanup(() => clearInterval(intervalId));
+
+ const video = document.createElement('video');
+ video.autoplay = true;
+ video.width = width;
+ video.height = height;
+ video.muted = true;
+
+ await initiateSingleTrackCall(t, generator.track, video);
+
+ return new Promise(resolve => {
+ video.ontimeupdate = t.step_func(() => {
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ const context = canvas.getContext('2d');
+ context.drawImage(video, 0, 0);
+ // Pick a pixel in the centre of the video and check that it has the
+ // colour of the frame provided.
+ const pixel = context.getImageData(width / 2, height / 2, 1, 1);
+ // Encoding/decoding can add noise, so increase the threshhold to 8.
+ assertPixel(t, pixel.data, pixelColour, 8);
+ resolve();
+ });
+ });
+ }, 'Tests that frames are actually rendered correctly in a stream sent over a peer connection.');
+
+
+ promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const inputCanvas = new OffscreenCanvas(width, height);
+
+ const inputContext = inputCanvas.getContext('2d', {alpha: false});
+ // draw four quadrants
+ const colorUL = [255, 0, 0, 255];
+ inputContext.fillStyle = `rgba(${colorUL.join()})`;
+ inputContext.fillRect(0, 0, width / 2, height / 2);
+ const colorUR = [255, 255, 0, 255];
+ inputContext.fillStyle = `rgba(${colorUR.join()})`;
+ inputContext.fillRect(width / 2, 0, width / 2, height / 2);
+ const colorLL = [0, 255, 0, 255];
+ inputContext.fillStyle = `rgba(${colorLL.join()})`;
+ inputContext.fillRect(0, height / 2, width / 2, height / 2);
+ const colorLR = [0, 255, 255, 255];
+ inputContext.fillStyle = `rgba(${colorLR.join()})`;
+ inputContext.fillRect(width / 2, height / 2, width / 2, height / 2);
+
+ // Write frames for the duration of the test.
+ const writer = generator.writable.getWriter();
+ let timestamp = 0;
+ const intervalId = setInterval(
+ t.step_func(async () => {
+ if (generator.track.readyState === 'live') {
+ timestamp++;
+ await writer.write(new VideoFrame(
+ inputCanvas, {timestamp: timestamp, alpha: 'discard'}));
+ }
+ }),
+ 40);
+ t.add_cleanup(() => clearInterval(intervalId));
+
+ const caller = new RTCPeerConnection();
+ t.add_cleanup(() => caller.close());
+ const callee = new RTCPeerConnection();
+ t.add_cleanup(() => callee.close());
+ const sender = caller.addTrack(generator.track);
+
+ exchangeIceCandidates(caller, callee);
+ // Wait for the first track.
+ const e = await exchangeOfferAndListenToOntrack(t, caller, callee);
+
+ // Exchange answer.
+ await exchangeAnswer(caller, callee);
+ await waitForConnectionStateChange(callee, ['connected']);
+ const params = sender.getParameters();
+ params.encodings.forEach(e => e.scaleResolutionDownBy = 2);
+ sender.setParameters(params);
+
+ const processor = new MediaStreamTrackProcessor(e.track);
+ const reader = processor.readable.getReader();
+
+ // The first frame may not have had scaleResolutionDownBy applied
+ const numTries = 5;
+ for (let i = 1; i <= numTries; i++) {
+ const {value: outputFrame} = await reader.read();
+ if (outputFrame.displayWidth !== width / 2) {
+ assert_less_than(i, numTries, `First ${numTries} frames were the wrong size.`);
+ outputFrame.close();
+ continue;
+ }
+
+ assert_equals(outputFrame.displayWidth, width / 2);
+ assert_equals(outputFrame.displayHeight, height / 2);
+
+ const outputCanvas = new OffscreenCanvas(width / 2, height / 2);
+ const outputContext = outputCanvas.getContext('2d', {alpha: false});
+ outputContext.drawImage(outputFrame, 0, 0);
+ outputFrame.close();
+ // Check the four quadrants
+ const pixelUL = outputContext.getImageData(width / 8, height / 8, 1, 1);
+ assertPixel(t, pixelUL.data, colorUL);
+ const pixelUR =
+ outputContext.getImageData(width * 3 / 8, height / 8, 1, 1);
+ assertPixel(t, pixelUR.data, colorUR);
+ const pixelLL =
+ outputContext.getImageData(width / 8, height * 3 / 8, 1, 1);
+ assertPixel(t, pixelLL.data, colorLL);
+ const pixelLR =
+ outputContext.getImageData(width * 3 / 8, height * 3 / 8, 1, 1);
+ assertPixel(t, pixelLR.data, colorLR);
+ break;
+ }
+ }, 'Tests that frames are sent correctly with RTCRtpEncodingParameters.scaleResolutionDownBy.');
+
+ promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ await writer.write(frame);
+
+ assert_equals(generator.track.kind, "video");
+ assert_equals(generator.track.readyState, "live");
+ }, "Tests that creating a VideoTrackGenerator works as expected");
+
+ promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ await writer.write(frame);
+
+ assert_throws_dom("InvalidStateError", () => frame.clone(), "VideoFrame wasn't destroyed on write.");
+ }, "Tests that VideoFrames are destroyed on write.");
+
+ promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ const writer = generator.writable.getWriter();
+ const frame = makeVideoFrame(1);
+ assert_throws_js(TypeError, writer.write(frame));
+ }, "Mismatched frame and generator kind throws on write.");
+
+ promise_test(async t => {
+ const generator = new VideoTrackGenerator();
+ t.add_cleanup(() => generator.track.stop());
+
+ // Use a MediaStreamTrackProcessor as a sink for |generator| to verify
+ // that |processor| actually forwards the frames written to its writable
+ // field.
+ const processor = new MediaStreamTrackProcessor(generator.track);
+ const reader = processor.readable.getReader();
+ const videoFrame = makeVideoFrame(1);
+
+ const writer = generator.writable.getWriter();
+ const videoFrame1 = makeVideoFrame(1);
+ writer.write(videoFrame1);
+ const result1 = await reader.read();
+ assert_equals(result1.value.timestamp, 1);
+ generator.muted = true;
+
+ // This frame is expected to be discarded.
+ const videoFrame2 = makeVideoFrame(2);
+ writer.write(videoFrame2);
+ generator.muted = false;
+
+ const videoFrame3 = makeVideoFrame(3);
+ writer.write(videoFrame3);
+ const result3 = await reader.read();
+ assert_equals(result3.value.timestamp, 3);
+
+ // Set up a read ahead of time, then mute, enqueue and unmute.
+ const promise5 = reader.read();
+ generator.muted = true;
+ writer.write(makeVideoFrame(4)); // Expected to be discarded.
+ generator.muted = false;
+ writer.write(makeVideoFrame(5));
+ const result5 = await promise5;
+ assert_equals(result5.value.timestamp, 5);
+ }, 'Tests that VideoTrackGenerator forwards frames only when unmuted');
+
+ // Note - tests for mute/unmute events will be added once
+ // https://github.com/w3c/mediacapture-transform/issues/81 is resolved
+
+ </script>
+</body>
+</html>