summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface')
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html205
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html96
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html85
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-denormals.https.window.js26
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html66
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html76
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers2
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html29
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-constructor.https.window.js33
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html39
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html62
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html73
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html53
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html149
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html100
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html58
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html80
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html56
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html30
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html23
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html87
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js94
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js19
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js15
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/denormal-test-processor.js12
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js12
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js18
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js22
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js40
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js38
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js22
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js27
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js49
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js19
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js30
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js55
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js18
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js40
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/register-processor-typeerrors.js39
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js35
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js25
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js42
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js78
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html90
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html51
64 files changed, 3149 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html
new file mode 100644
index 0000000000..dc324b22d6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test the invocation order of AudioWorklet.addModule() and BaseAudioContext
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ setup(() => {
+ let sampleRate = 48000;
+ let realtimeContext = new AudioContext();
+ let offlineContext = new OfflineAudioContext(1, sampleRate, sampleRate);
+
+ let filePath = 'processors/dummy-processor.js';
+
+ // Test if the browser does not crash upon addModule() call after the
+ // realtime context construction.
+ audit.define(
+ {label: 'module-loading-after-realtime-context-creation'},
+ (task, should) => {
+ let dummyWorkletNode =
+ new AudioWorkletNode(realtimeContext, 'dummy');
+ dummyWorkletNode.connect(realtimeContext.destination);
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode ' +
+ 'from realtime context')
+ .beTrue();
+ task.done();
+ });
+
+ // Test if the browser does not crash upon addModule() call after the
+ // offline context construction.
+ audit.define(
+ {label: 'module-loading-after-offline-context-creation'},
+ (task, should) => {
+ let dummyWorkletNode =
+ new AudioWorkletNode(offlineContext, 'dummy');
+ dummyWorkletNode.connect(offlineContext.destination);
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode ' +
+ 'from offline context')
+ .beTrue();
+ task.done();
+ });
+
+ Promise.all([
+ realtimeContext.audioWorklet.addModule(filePath),
+ offlineContext.audioWorklet.addModule(filePath)
+ ]).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html
new file mode 100644
index 0000000000..9e93f48ab8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html
@@ -0,0 +1,205 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>
+ Test get parameterDescriptor as various iterables
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ </head>
+
+ <body>
+ <script id="params">
+ // A series of AudioParamDescriptors, copied one by one into various iterable
+ // data structures. This is used by both the processor side and the main
+ // thread side, so is in a different script tag.
+ const PARAMS = [
+ {
+ name: "a control-rate parameter",
+ defaultValue: 0.5,
+ minValue: 0,
+ maxValue: 1,
+ automationRate: "a-rate",
+ },
+ {
+ name: "你好",
+ defaultValue: 2.5,
+ minValue: 0,
+ maxValue: 7,
+ automationRate: "a-rate",
+ },
+ {
+ name: "🎶",
+ defaultValue: 8.5,
+ minValue: 0,
+ maxValue: 11115,
+ automationRate: "k-rate",
+ },
+ ];
+ </script>
+ <script id="processors" type="worklet">
+ registerProcessor("set",
+ class SetParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ var s = new Set();
+ s.add(PARAMS[0]);
+ s.add(PARAMS[1]);
+ s.add(PARAMS[2]);
+ return s;
+ }
+ constructor() { super(); }
+ process() {
+ }
+ });
+
+ registerProcessor("array",
+ class ArrayParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return PARAMS;
+ }
+ constructor() { super(); }
+ process() { }
+ });
+
+ function* gen() {
+ yield PARAMS[0];
+ yield PARAMS[1];
+ yield PARAMS[2];
+ }
+ registerProcessor("generator",
+ class GeneratorParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return gen();
+ }
+ constructor() { super(); }
+ process() { }
+ });
+ // Test a processor that has a get parameterDescriptors, but it returns
+ // something that is not iterable.
+ try {
+ registerProcessor("invalid",
+ class InvalidParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return 4;
+ }
+ constructor() { super(); }
+ process() { }
+ });
+ throw "This should not have been reached.";
+ } catch (e) {
+ // unclear how to signal success here, but we can signal failure in the
+ // developer console
+ if (e.name != "TypeError") {
+ throw "This should be TypeError";
+ }
+ }
+ // Test a processor that has a get parameterDescriptors, with a duplicate
+ // param name something that is not iterable.
+ try {
+ registerProcessor("duplicate-param-name",
+ class DuplicateParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ var p = {
+ name: "a",
+ defaultValue: 1,
+ minValue: 0,
+ maxValue: 1,
+ automationRate: "k-rate",
+ };
+ return [p,p];
+ }
+ constructor() { super(); }
+ process() { }
+ });
+ throw "This should not have been reached.";
+ } catch (e) {
+ // unclear how to signal success here, but we can signal failure in the
+ // developer console
+ if (e.name != "NotSupportedError") {
+ throw "This should be NotSupportedError";
+ }
+ }
+ // Test a processor that has a no get parameterDescriptors.
+ try {
+ registerProcessor("no-params",
+ class NoParamProcessor extends AudioWorkletProcessor {
+ constructor() { super(); }
+ process() { }
+ });
+ } catch (e) {
+ throw "Construction should have worked.";
+ }
+ </script>
+ <script>
+ setup({ explicit_done: true });
+ // Mangle the PARAMS object into a map that has the same shape as what an
+ // AudioWorkletNode.parameter property would
+ var PARAMS_MAP = new Map();
+ for (var param of PARAMS) {
+ var o = param;
+ var name = o.name;
+ delete o.name;
+ PARAMS_MAP.set(name, o);
+ }
+
+ // This compares `lhs` and `rhs`, that are two maplike with the same shape
+ // as PARAMS_MAP.
+ function compare(testname, lhs, rhs) {
+ equals(lhs.size, rhs.size, "Map match in size for " + testname);
+ var i = 0;
+ for (var [k, v] of lhs) {
+ is_true(rhs.has(k), testname + ": " + k + " exists in both maps");
+ var vrhs = rhs.get(k);
+ ["defaultValue", "minValue", "maxValue", "automationRate"].forEach(
+ paramKey => {
+ equals(
+ v[paramKey],
+ vrhs[paramKey],
+ `Values for ${k}.${paramKey} match for ${testname}`
+ );
+ }
+ );
+ }
+ }
+ var ac = new AudioContext();
+ var url = URLFromScriptsElements(["params", "processors"]);
+ ac.audioWorklet
+ .addModule(url)
+ .then(() => {
+ ["set", "array", "generator"].forEach(iterable => {
+ test(() => {
+ var node = new AudioWorkletNode(ac, iterable);
+ compare(iterable, node.parameters, PARAMS_MAP);
+ }, `Creating an AudioWorkletNode with a ${iterable} for
+ parameter descriptor worked`);
+ });
+ })
+ .then(function() {
+ test(function() {
+ assert_throws_dom("InvalidStateError", function() {
+ new AudioWorkletNode(ac, "invalid");
+ });
+ }, `Attempting to create an AudioWorkletNode with an non
+ iterable for parameter descriptor should not work`);
+ })
+ .then(function() {
+ test(() => {
+ new AudioWorkletNode(ac, "no-params");
+ }, `Attempting to create an AudioWorkletNode from a processor
+ that does not have a parameterDescriptors getter should work`);
+ })
+ .then(function() {
+ test(function() {
+ assert_throws_dom("InvalidStateError", function() {
+ new AudioWorkletNode(ac, "duplicate-param-name");
+ });
+ }, `Attempting to create an AudioWorkletNode with two parameter
+ descriptor with the same name should not work`);
+ }).then(function() {
+ done();
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html
new file mode 100644
index 0000000000..9578b26881
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html
@@ -0,0 +1,96 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test AudioParam Array Size
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+ let filePath = 'processors/param-size-processor.js';
+ let context;
+
+ // Use a power of two so there's no roundoff computing times from frames.
+ let sampleRate = 16384;
+
+ // Sets up AudioWorklet and OfflineAudioContext.
+ audit.define('Initializing AudioWorklet and Context', (task, should) => {
+ should(() => {
+ context = new OfflineAudioContext(
+ 1, 10 * RENDER_QUANTUM_FRAMES, sampleRate);
+ }, 'Creating offline context for testing').notThrow();
+
+ should(
+ context.audioWorklet.addModule(filePath), 'Creating test worklet')
+ .beResolved()
+ .then(() => {
+ task.done();
+ });
+ });
+
+ audit.define('Verify Size of AudioParam Arrays', (task, should) => {
+ let node = new AudioWorkletNode(context, 'param-size');
+ let nodeParam = node.parameters.get('param');
+
+ node.connect(context.destination);
+
+ let renderQuantumDuration = RENDER_QUANTUM_FRAMES / context.sampleRate;
+
+ // Set up some automations, after one render quantum. We want the first
+ // render not to have any automations, just to be sure we handle that
+ // case correctly.
+ context.suspend(renderQuantumDuration)
+ .then(() => {
+ let now = context.currentTime;
+
+ // Establish the first automation event.
+ nodeParam.setValueAtTime(1, now);
+ // The second render should be constant
+ nodeParam.setValueAtTime(0, now + renderQuantumDuration);
+ // The third render and part of the fourth is a linear ramp
+ nodeParam.linearRampToValueAtTime(
+ 1, now + 2.5 * renderQuantumDuration);
+ // Everything afterwards should be constant.
+ })
+ .then(() => context.resume());
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ let data = renderedBuffer.getChannelData(0);
+
+ // The very first render quantum should be constant, so the array
+ // has length 1.
+ should(
+ data.slice(0, RENDER_QUANTUM_FRAMES),
+ 'Render quantum 0: array size')
+ .beConstantValueOf(1);
+
+ should(
+ data.slice(RENDER_QUANTUM_FRAMES, 2 * RENDER_QUANTUM_FRAMES),
+ 'Render quantum 1: array size')
+ .beConstantValueOf(1);
+
+ should(
+ data.slice(
+ 2 * RENDER_QUANTUM_FRAMES, 4 * RENDER_QUANTUM_FRAMES),
+ 'Render quantum 2-3: array size')
+ .beConstantValueOf(RENDER_QUANTUM_FRAMES);
+
+ should(
+ data.slice(4 * RENDER_QUANTUM_FRAMES),
+ 'Remaining renders: array size')
+ .beConstantValueOf(1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html
new file mode 100644
index 0000000000..8e51470f64
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html
@@ -0,0 +1,85 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's basic AudioParam features
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 48000;
+ let renderLength = 48000 * 0.6;
+ let context;
+
+ let filePath = 'processors/gain-processor.js';
+
+ // Sets up AudioWorklet and OfflineAudioContext.
+ audit.define('Initializing AudioWorklet and Context', (task, should) => {
+ context = new OfflineAudioContext(1, renderLength, sampleRate);
+ context.audioWorklet.addModule(filePath).then(() => {
+ task.done();
+ });
+ });
+
+ // Verifies the functionality of AudioParam in AudioWorkletNode by
+ // comparing (canceling out) values from GainNode and AudioWorkletNode
+ // with simple gain computation code by AudioParam.
+ audit.define(
+ 'Verifying AudioParam in AudioWorkletNode',
+ (task, should) => {
+ let constantSourceNode = new ConstantSourceNode(context);
+ let gainNode = new GainNode(context);
+ let inverterNode = new GainNode(context, {gain: -1});
+ let gainWorkletNode = new AudioWorkletNode(context, 'gain');
+ let gainWorkletParam = gainWorkletNode.parameters.get('gain');
+
+ // Test default value and setter/getter functionality.
+ should(gainWorkletParam.value,
+ 'Default gain value of gainWorkletNode')
+ .beEqualTo(Math.fround(0.707));
+ gainWorkletParam.value = 0.1;
+ should(gainWorkletParam.value,
+ 'Value of gainWorkletParam after setter = 0.1')
+ .beEqualTo(Math.fround(0.1));
+
+ constantSourceNode.connect(gainNode)
+ .connect(inverterNode)
+ .connect(context.destination);
+ constantSourceNode.connect(gainWorkletNode)
+ .connect(context.destination);
+
+ // With arbitrary times and values, test all possible AudioParam
+ // automations.
+ [gainNode.gain, gainWorkletParam].forEach((param) => {
+ param.setValueAtTime(0, 0);
+ param.linearRampToValueAtTime(1, 0.1);
+ param.exponentialRampToValueAtTime(0.5, 0.2);
+ param.setValueCurveAtTime([0, 2, 0.3], 0.2, 0.1);
+ param.setTargetAtTime(0.01, 0.4, 0.5);
+ });
+
+ // Test if the setter works correctly in the middle of rendering.
+ context.suspend(0.5).then(() => {
+ gainNode.gain.value = 1.5;
+ gainWorkletParam.value = 1.5;
+ context.resume();
+ });
+
+ constantSourceNode.start();
+ context.startRendering().then((renderedBuffer) => {
+ should(renderedBuffer.getChannelData(0),
+ 'The rendered buffer')
+ .beConstantValueOf(0);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-denormals.https.window.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-denormals.https.window.js
new file mode 100644
index 0000000000..39b9be56e6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-denormals.https.window.js
@@ -0,0 +1,26 @@
+'use strict';
+
+// Test if the JS code execution in AudioWorkletGlobalScope can handle the
+// denormals properly. For more details, see:
+// https://esdiscuss.org/topic/float-denormal-issue-in-javascript-processor-node-in-web-audio-api
+promise_test(async () => {
+ // In the main thread, the denormals should be non-zeros.
+ assert_not_equals(Number.MIN_VALUE, 0.0,
+ 'The denormals should be non-zeros.');
+
+ const context = new AudioContext();
+ await context.audioWorklet.addModule(
+ './processors/denormal-test-processor.js');
+
+ const denormalTestProcessor = new AudioWorkletNode(context, 'denormal-test');
+
+ return new Promise(resolve => {
+ denormalTestProcessor.port.onmessage = resolve;
+ denormalTestProcessor.connect(context.destination);
+ }).then(event => {
+ // In the AudioWorkletGlobalScope, the denormals should be non-zeros too.
+ assert_true(
+ event.data.result,
+ 'The denormals should be non-zeros in AudioWorkletGlobalScope.');
+ });
+}, 'Test denormal behavior in AudioWorkletGlobalScope');
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html
new file mode 100644
index 0000000000..546bd1d0d0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test MessagePort in AudioWorkletNode and AudioWorkletProcessor
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context = new AudioContext();
+
+ let filePath = 'processors/port-processor.js';
+
+ // Creates an AudioWorkletNode and sets an EventHandler on MessagePort
+ // object. The associated PortProcessor will post a message upon its
+ // construction. Test if the message is received correctly.
+ audit.define(
+ 'Test postMessage from AudioWorkletProcessor to AudioWorkletNode',
+ (task, should) => {
+ let porterWorkletNode =
+ new AudioWorkletNode(context, 'port-processor');
+
+ // Upon the creation of PortProcessor, it will post a message to the
+ // node with 'created' status.
+ porterWorkletNode.port.onmessage = (event) => {
+ should(event.data.state,
+ 'The initial message from PortProcessor')
+ .beEqualTo('created');
+ task.done();
+ };
+ });
+
+ // PortProcessor is supposed to echo the message back to the
+ // AudioWorkletNode.
+ audit.define(
+ 'Test postMessage from AudioWorkletNode to AudioWorkletProcessor',
+ (task, should) => {
+ let porterWorkletNode =
+ new AudioWorkletNode(context, 'port-processor');
+
+ porterWorkletNode.port.onmessage = (event) => {
+ // Ignore if the delivered message has |state|. This is already
+ // tested in the previous task.
+ if (event.data.state)
+ return;
+
+ should(event.data.message,
+ 'The response from PortProcessor')
+ .beEqualTo('hello');
+ task.done();
+ };
+
+ porterWorkletNode.port.postMessage('hello');
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
new file mode 100644
index 0000000000..a5dd004981
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
@@ -0,0 +1,76 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test passing SharedArrayBuffer to an AudioWorklet
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context = new AudioContext();
+
+ let filePath = 'processors/sharedarraybuffer-processor.js';
+
+ audit.define(
+ 'Test postMessage from AudioWorkletProcessor to AudioWorkletNode',
+ (task, should) => {
+ let workletNode =
+ new AudioWorkletNode(context, 'sharedarraybuffer-processor');
+
+ // After it is created, the worklet will send a new
+ // SharedArrayBuffer to the main thread.
+ //
+ // The worklet will then wait to receive a message from the main
+ // thread.
+ //
+ // When it receives the message, it will check whether it is a
+ // SharedArrayBuffer, and send this information back to the main
+ // thread.
+
+ workletNode.port.onmessage = (event) => {
+ let data = event.data;
+ switch (data.state) {
+ case 'created':
+ should(
+ data.sab instanceof SharedArrayBuffer,
+ 'event.data.sab from worklet is an instance of SharedArrayBuffer')
+ .beTrue();
+
+ // Send a SharedArrayBuffer back to the worklet.
+ let sab = new SharedArrayBuffer(8);
+ workletNode.port.postMessage(sab);
+ break;
+
+ case 'received message':
+ should(data.isSab, 'event.data from main thread is an instance of SharedArrayBuffer')
+ .beTrue();
+ task.done();
+ break;
+
+ default:
+ should(false,
+ `Got unexpected message from worklet: ${data.state}`)
+ .beTrue();
+ task.done();
+ break;
+ }
+ };
+
+ workletNode.port.onmessageerror = (event) => {
+ should(false, 'Got messageerror from worklet').beTrue();
+ task.done();
+ };
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ </script>
+ </body>
+</html>
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers
new file mode 100644
index 0000000000..63b60e490f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers
@@ -0,0 +1,2 @@
+Cross-Origin-Opener-Policy: same-origin
+Cross-Origin-Embedder-Policy: require-corp
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html
new file mode 100644
index 0000000000..718cadffc7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletGlobalScope's registerProcessor() called on globalThis
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const realtimeContext = new AudioContext();
+ const filePath = 'processors/dummy-processor-globalthis.js';
+
+ audit.define('registerprocessor-called-on-globalthis', (task, should) => {
+ realtimeContext.audioWorklet.addModule(filePath).then(() => {
+ const dummyWorkletNode = new AudioWorkletNode(realtimeContext, 'dummy-globalthis');
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode').beTrue();
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-constructor.https.window.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-constructor.https.window.js
new file mode 100644
index 0000000000..679480b480
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-constructor.https.window.js
@@ -0,0 +1,33 @@
+'use strict';
+
+// https://crbug.com/1078902: this test verifies two TypeError cases from
+// registerProcessor() method:
+// - When a given parameter is not a Function.
+// - When a given parameter is not a constructor.
+const TestDescriptions = [
+ 'The parameter should be of type "Function".',
+ 'The class definition of AudioWorkletProcessor should be a constructor.'
+];
+
+// See `register-processor-exception.js` file for the test details.
+promise_test(async () => {
+ const context = new AudioContext();
+ await context.audioWorklet.addModule(
+ './processors/register-processor-typeerrors.js');
+ const messenger = new AudioWorkletNode(context, 'messenger-processor');
+
+ return new Promise(resolve => {
+ let testIndex = 0;
+ messenger.port.onmessage = (event) => {
+ const exception = event.data;
+ assert_equals(exception.name, 'TypeError',
+ TestDescriptions[testIndex]);
+ if (++testIndex === TestDescriptions.length) {
+ resolve();
+ }
+ };
+
+ // Start the test on AudioWorkletGlobalScope.
+ messenger.port.postMessage({});
+ });
+}, 'Verifies two TypeError cases from registerProcessor() method.');
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html
new file mode 100644
index 0000000000..de31f71427
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test dynamic registerProcessor() calls in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script>
+ const t = async_test('Dynamic registration in AudioWorkletGlobalScope');
+
+ const realtimeContext = new AudioContext();
+ const filePath = 'processors/dynamic-register-processor.js';
+
+ // Test if registering an AudioWorkletProcessor dynamically (after the
+ // initial module script loading) works correctly. In the construction of
+ // nodeB (along with ProcessorB), it registers ProcessorA's definition.
+ realtimeContext.audioWorklet.addModule(filePath).then(() => {
+ const nodeB = new AudioWorkletNode(realtimeContext, 'ProcessorB');
+ assert_true(nodeB instanceof AudioWorkletNode,
+ 'nodeB should be instance of AudioWorkletNode');
+ nodeB.port.postMessage({});
+ nodeB.port.onmessage = () => {
+ const nodeA = new AudioWorkletNode(realtimeContext, 'ProcessorA');
+ t.step(() => {
+ assert_true(nodeA instanceof AudioWorkletNode,
+ 'nodeA should be instance of AudioWorkletNode');
+ });
+ t.done();
+ };
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html
new file mode 100644
index 0000000000..685546aeb5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html
@@ -0,0 +1,39 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test if activation of worklet thread does not resume context rendering.
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const context = new AudioContext();
+ const filePath = 'processors/dummy-processor.js';
+
+ context.suspend();
+
+ // Suspends the context right away and then activate worklet. The current
+ // time must not advance since the context is suspended.
+ audit.define(
+ {label: 'load-worklet-and-suspend'},
+ async (task, should) => {
+ await context.audioWorklet.addModule(filePath);
+ const suspendTime = context.currentTime;
+ const dummy = new AudioWorkletNode(context, 'dummy');
+ dummy.connect(context.destination);
+ return task.timeout(() => {
+ should(context.currentTime === suspendTime,
+ 'context.currentTime did not change after worklet started')
+ .beTrue();
+ should(context.state, 'context.state').beEqualTo('suspended');
+ }, 500);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html
new file mode 100644
index 0000000000..3a480464e9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html
@@ -0,0 +1,62 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>
+ Test the behaviour of AudioWorkletProcessor when an `onmessage` handler
+ throws.
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ </head>
+
+ <body>
+ <script id="processor" type="worklet">
+ registerProcessor("test-throw", class param extends AudioWorkletProcessor {
+ constructor() {
+ super()
+ this.i = 0;
+ this.port.onmessage = function(arg) {
+ throw "asdasd";
+ }
+ }
+ process(input, output, parameters) {
+ this.i++;
+ this.port.postMessage(this.i);
+ return true;
+ }
+ });
+ </script>
+ <script>
+ var latestIndexReceived = 0;
+ var node = null;
+ var ac = null;
+ promise_setup(function() {
+ ac = new AudioContext();
+ var url = URLFromScriptsElements(["processor"]);
+ return ac.audioWorklet.addModule(url).then(function() {
+ node = new AudioWorkletNode(ac, "test-throw");
+ node.port.onmessage = function(e) {
+ latestIndexReceived = parseInt(e.data);
+ };
+ });
+ });
+ promise_test(async t => {
+ var currentIndex = latestIndexReceived;
+ await t.step_wait(() => {
+ return latestIndexReceived > currentIndex;
+ }, "Process is still being called");
+
+ node.port.postMessage("asdasd"); // This throws on the processor side.
+ node.onprocessorerror = function() {
+ assert_true(false, "onprocessorerror must not be called.");
+ };
+ currentIndex = latestIndexReceived;
+ await t.step_wait(() => {
+ return latestIndexReceived > currentIndex + 2;
+ }, "Process is still being called");
+ }, `Throwing in an onmessage handler in the AudioWorkletGlobalScope shouldn't stop AudioWorkletProcessor`);
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
new file mode 100644
index 0000000000..84458d0aaa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test sampleRate in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ setup(() => {
+ let sampleRate = 48000;
+ let renderLength = 512;
+ let context = new OfflineAudioContext(1, renderLength, sampleRate);
+
+ let filePath = 'processors/one-pole-processor.js';
+
+ // Without rendering the context, attempt to access |sampleRate| in the
+ // global scope as soon as it is created.
+ audit.define(
+ 'Query |sampleRate| upon AudioWorkletGlobalScope construction',
+ (task, should) => {
+ let onePoleFilterNode =
+ new AudioWorkletNode(context, 'one-pole-filter');
+ let frequencyParam = onePoleFilterNode.parameters.get('frequency');
+
+ should(frequencyParam.maxValue,
+ 'frequencyParam.maxValue')
+ .beEqualTo(0.5 * context.sampleRate);
+
+ task.done();
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
new file mode 100644
index 0000000000..5f4bee7c53
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test currentTime and currentFrame in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ setup(() => {
+ let sampleRate = 48000;
+ let renderLength = 512;
+ let context = new OfflineAudioContext(1, renderLength, sampleRate);
+
+ let filePath = 'processors/timing-info-processor.js';
+
+ audit.define(
+ 'Check the timing information from AudioWorkletProcessor',
+ (task, should) => {
+ let portWorkletNode =
+ new AudioWorkletNode(context, 'timing-info-processor');
+ portWorkletNode.connect(context.destination);
+
+ // Suspend at render quantum boundary and check the timing
+ // information between the main thread and the rendering thread.
+ [0, 128, 256, 384].map((suspendFrame) => {
+ context.suspend(suspendFrame/sampleRate).then(() => {
+ portWorkletNode.port.onmessage = (event) => {
+ should(event.data.currentFrame,
+ 'currentFrame from the processor at ' + suspendFrame)
+ .beEqualTo(suspendFrame);
+ should(event.data.currentTime,
+ 'currentTime from the processor at '
+ + context.currentTime)
+ .beEqualTo(context.currentTime);
+ context.resume();
+ };
+
+ portWorkletNode.port.postMessage('query-timing-info');
+ });
+ });
+
+ context.startRendering().then(() => {
+ task.done();
+ });
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html
new file mode 100644
index 0000000000..330b359f7d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's automatic pull feature
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ // Arbitrary sample rate. Anything should work.
+ const sampleRate = 48000;
+ const renderLength = RENDER_QUANTUM_FRAMES * 2;
+ const channelCount = 1;
+ const filePath = 'processors/zero-output-processor.js';
+
+ const sourceOffset = 0.5;
+
+ // Connect a constant source node to the zero-output AudioWorkletNode.
+ // Then verify if it captures the data correctly.
+ audit.define('setup-worklet', (task, should) => {
+ const context =
+ new OfflineAudioContext(channelCount, renderLength, sampleRate);
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ let testSource =
+ new ConstantSourceNode(context, { offset: sourceOffset });
+ let zeroOutputWorkletNode =
+ new AudioWorkletNode(context, 'zero-output-processor', {
+ numberOfInputs: 1,
+ numberOfOutputs: 0,
+ processorOptions: {
+ bufferLength: renderLength,
+ channeCount: channelCount
+ }
+ });
+
+ // Start the source and stop at the first render quantum.
+ testSource.connect(zeroOutputWorkletNode);
+ testSource.start();
+ testSource.stop(RENDER_QUANTUM_FRAMES/sampleRate);
+
+ zeroOutputWorkletNode.port.onmessage = (event) => {
+ // The |capturedBuffer| can be multichannel. Iterate through it.
+ for (let i = 0; i < event.data.capturedBuffer.length; ++i) {
+ let buffer = event.data.capturedBuffer[i];
+ // Split the captured buffer in half for the easier test.
+ should(buffer.subarray(0, RENDER_QUANTUM_FRAMES),
+ 'The first half of the captured buffer')
+ .beConstantValueOf(sourceOffset);
+ should(buffer.subarray(RENDER_QUANTUM_FRAMES, renderLength),
+ 'The second half of the captured buffer')
+ .beConstantValueOf(0);
+ }
+ task.done();
+ };
+
+ // Starts the rendering, but we don't need the rendered buffer from
+ // the context.
+ context.startRendering();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html
new file mode 100644
index 0000000000..11c237f19d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's dynamic channel count feature
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary numbers used to align the test with render quantum boundary.
+ let sampleRate = RENDER_QUANTUM_FRAMES * 100;
+ let renderLength = RENDER_QUANTUM_FRAMES * 2;
+ let context;
+
+ let filePath = 'processors/gain-processor.js';
+
+ let testChannelValues = [1, 2, 3];
+
+ // Creates a 3-channel buffer and play with BufferSourceNode. The source
+ // goes through a bypass AudioWorkletNode (gain value of 1).
+ audit.define('setup-buffer-and-worklet', (task, should) => {
+ context = new OfflineAudioContext(testChannelValues.length,
+ renderLength,
+ sampleRate);
+
+ // Explicitly sets the destination channelCountMode and
+ // channelInterpretation to make sure the result does no mixing.
+ context.channeCountMode = 'explicit';
+ context.channelInterpretation = 'discrete';
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ let testBuffer = createConstantBuffer(context, 1, testChannelValues);
+ let sourceNode = new AudioBufferSourceNode(context);
+ let gainWorkletNode = new AudioWorkletNode(context, 'gain');
+
+ gainWorkletNode.parameters.get('gain').value = 1.0;
+ sourceNode.connect(gainWorkletNode).connect(context.destination);
+
+ // Suspend the context at 128 sample frames and play the source with
+ // the assigned buffer.
+ context.suspend(RENDER_QUANTUM_FRAMES/sampleRate).then(() => {
+ sourceNode.buffer = testBuffer;
+ sourceNode.loop = true;
+ sourceNode.start();
+ context.resume();
+ });
+ task.done();
+ });
+ });
+
+ // Verifies if the rendered buffer has all zero for the first half (before
+ // 128 samples) and the expected values for the second half.
+ audit.define('verify-rendered-buffer', (task, should) => {
+ context.startRendering().then(renderedBuffer => {
+ testChannelValues.forEach((value, index) => {
+ let channelData = renderedBuffer.getChannelData(index);
+ should(channelData.subarray(0, RENDER_QUANTUM_FRAMES),
+ 'First half of Channel #' + index)
+ .beConstantValueOf(0);
+ should(channelData.subarray(RENDER_QUANTUM_FRAMES, renderLength),
+ 'Second half of Channel #' + index)
+ .beConstantValueOf(value);
+ });
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html
new file mode 100644
index 0000000000..8b7704a781
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html
@@ -0,0 +1,53 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test the construction of AudioWorkletNode with real-time context
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let realtimeContext = new AudioContext();
+
+ let filePath = 'processors/dummy-processor.js';
+
+ // Test if an exception is thrown correctly when AWN constructor is
+ // invoked before resolving |.addModule()| promise.
+ audit.define(
+ {label: 'construction-before-module-loading'},
+ (task, should) => {
+ should(() => new AudioWorkletNode(realtimeContext, 'dummy'),
+ 'Creating a node before loading a module should throw.')
+ .throw(DOMException, 'InvalidStateError');
+
+ task.done();
+ });
+
+ // Test the construction of AudioWorkletNode after the resolution of
+ // |.addModule()|. Also the constructor must throw an exception when
+ // a unregistered node name was given.
+ audit.define(
+ {label: 'construction-after-module-loading'},
+ (task, should) => {
+ realtimeContext.audioWorklet.addModule(filePath).then(() => {
+ let dummyWorkletNode =
+ new AudioWorkletNode(realtimeContext, 'dummy');
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode')
+ .beTrue();
+ should(() => new AudioWorkletNode(realtimeContext, 'foobar'),
+ 'Unregistered name "foobar" must throw an exception.')
+ .throw();
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html
new file mode 100644
index 0000000000..d3347d265e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html
@@ -0,0 +1,149 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test of AudioWorkletNodeOptions
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const sampleRate = 48000;
+
+ const audit = Audit.createTaskRunner();
+ let context;
+
+ let filePath = 'processors/dummy-processor.js';
+
+ // Load script file and create a OfflineAudiocontext.
+ audit.define('setup', (task, should) => {
+ context = new OfflineAudioContext(1, 1, sampleRate);
+ context.audioWorklet.addModule(filePath).then(() => {
+ task.done();
+ });
+ });
+
+ // Test AudioWorkletNode construction without AudioWorkletNodeOptions.
+ audit.define('without-audio-node-options', (task, should) => {
+ let testNode;
+ should(
+ () => testNode = new AudioWorkletNode(context, 'dummy'),
+ 'Creating AudioWOrkletNode without options')
+ .notThrow();
+ should(testNode instanceof AudioWorkletNode,
+ 'testNode is instance of AudioWorkletNode').beEqualTo(true);
+ should(testNode.numberOfInputs,
+ 'testNode.numberOfInputs (default)').beEqualTo(1);
+ should(testNode.numberOfOutputs,
+ 'testNode.numberOfOutputs (default)').beEqualTo(1);
+ should(testNode.channelCount,
+ 'testNode.channelCount (default)').beEqualTo(2);
+ should(testNode.channelCountMode,
+ 'testNode.channelCountMode (default)').beEqualTo('max');
+ should(testNode.channelInterpretation,
+ 'testNode.channelInterpretation (default)')
+ .beEqualTo('speakers');
+ task.done();
+ });
+
+ // Test AudioWorkletNode constructor with AudioNodeOptions.
+ audit.define('audio-node-options', (task, should) => {
+ const options = {
+ numberOfInputs: 7,
+ numberOfOutputs: 18,
+ channelCount: 4,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'discrete'
+ };
+ const optionsString = JSON.stringify(options);
+
+ let testNode;
+ should(
+ () => testNode = new AudioWorkletNode(context, 'dummy', options),
+ 'Creating AudioWOrkletNode with options: ' + optionsString)
+ .notThrow();
+ should(testNode.numberOfInputs,
+ 'testNode.numberOfInputs').beEqualTo(options.numberOfInputs);
+ should(testNode.numberOfOutputs,
+ 'testNode.numberOfOutputs').beEqualTo(options.numberOfOutputs);
+ should(testNode.channelCount,
+ 'testNode.channelCount').beEqualTo(options.channelCount);
+ should(testNode.channelCountMode,
+ 'testNode.channelCountMode').beEqualTo(options.channelCountMode);
+ should(testNode.channelInterpretation,
+ 'testNode.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ task.done();
+ });
+
+ // Test AudioWorkletNode.channelCount.
+ audit.define('channel-count', (task, should) => {
+ const options1 = {channelCount: 17};
+ let testNode = new AudioWorkletNode(context, 'dummy', options1);
+ should(testNode.channelCount, 'testNode.channelCount')
+ .beEqualTo(options1.channelCount);
+
+ const options2 = {channelCount: 0};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options2),
+ 'Creating AudioWorkletNode with channelCount 0')
+ .throw(DOMException, 'NotSupportedError');
+
+ const options3 = {channelCount: 33};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options3),
+ 'Creating AudioWorkletNode with channelCount 33')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ // Test AudioWorkletNode.channelCountMode.
+ audit.define('channel-count-mode', (task, should) => {
+ const channelCountModes = ['max', 'clamped-max', 'explicit'];
+ channelCountModes.forEach((mode) => {
+ const options = {channelCountMode: mode};
+ let testNode = new AudioWorkletNode(context, 'dummy', options);
+ should(testNode.channelCountMode,
+ 'testNode.channelCountMode (set via options.' + mode + ')')
+ .beEqualTo(options.channelCountMode);
+ });
+
+ const options1 = {channelCountMode: 'foobar'};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options1),
+ 'Creating AudioWorkletNode with channelCountMode "foobar"')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // Test AudioWorkletNode.channelInterpretation.
+ audit.define('channel-interpretation', (task, should) => {
+ const channelInterpretations = ['speakers', 'discrete'];
+ channelInterpretations.forEach((interpretation) => {
+ const options = {channelInterpretation: interpretation};
+ let testNode = new AudioWorkletNode(context, 'dummy', options);
+ should(
+ testNode.channelInterpretation,
+ 'testNode.channelInterpretation (set via options.' +
+ interpretation + ')')
+ .beEqualTo(options.channelInterpretation);
+ });
+
+ const options1 = {channelInterpretation: 'foobar'};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options1),
+ 'Creating AudioWorkletNode with channelInterpretation "foobar"')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html
new file mode 100644
index 0000000000..c58502af01
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html
@@ -0,0 +1,100 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's Disconnected Input Array Length
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary numbers used to align the test with render quantum boundary.
+ // The sample rate is a power of two to eliminate roundoff in computing
+ // the suspend time needed for the test.
+ let sampleRate = 16384;
+ let renderLength = 8 * RENDER_QUANTUM_FRAMES;
+ let context;
+
+ let filePath = 'processors/input-length-processor.js';
+
+ let testChannelValues = [1, 2, 3];
+
+ // Creates a 3-channel buffer and play with BufferSourceNode. The source
+ // goes through a bypass AudioWorkletNode (gain value of 1).
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Input array length should be zero for disconnected input'
+ },
+ (task, should) => {
+ context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: renderLength,
+ sampleRate: sampleRate
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ let sourceNode = new ConstantSourceNode(context);
+ let workletNode =
+ new AudioWorkletNode(context, 'input-length-processor');
+
+ workletNode.connect(context.destination);
+
+ // Connect the source now.
+ let connectFrame = RENDER_QUANTUM_FRAMES;
+
+ context.suspend(connectFrame / sampleRate)
+ .then(() => {
+ sourceNode.connect(workletNode);
+ })
+ .then(() => context.resume());
+ ;
+
+ // Then disconnect the source after a few renders
+ let disconnectFrame = 3 * RENDER_QUANTUM_FRAMES;
+ context.suspend(disconnectFrame / sampleRate)
+ .then(() => {
+ sourceNode.disconnect(workletNode);
+ })
+ .then(() => context.resume());
+
+ sourceNode.start();
+ context.startRendering()
+ .then(resultBuffer => {
+ let data = resultBuffer.getChannelData(0);
+
+ should(
+ data.slice(0, connectFrame),
+ 'Before connecting the source: Input array length')
+ .beConstantValueOf(0);
+
+ // Find where the output is no longer 0.
+ let nonZeroIndex = data.findIndex(x => x > 0);
+ should(nonZeroIndex, 'First non-zero output')
+ .beEqualTo(connectFrame);
+
+ should(
+ data.slice(
+ nonZeroIndex,
+ nonZeroIndex + (disconnectFrame - connectFrame)),
+ 'While source is connected: Input array length')
+ .beConstantValueOf(RENDER_QUANTUM_FRAMES);
+ should(
+ data.slice(disconnectFrame),
+ 'After disconnecting the source: Input array length')
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
new file mode 100644
index 0000000000..95126a8c86
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
@@ -0,0 +1,58 @@
+<!DOCTYPE html>
+<title>Test onprocessorerror handler in AudioWorkletNode</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+let context = null;
+
+promise_setup(async () => {
+ const sampleRate = 48000;
+ const renderLength = sampleRate * 0.1;
+ context = new OfflineAudioContext(1, renderLength, sampleRate);
+
+ // Loads all processor definitions that are necessary for tests in this file.
+ await context.audioWorklet.addModule('./processors/error-processor.js');
+});
+
+promise_test(async () => {
+ const constructorErrorWorkletNode =
+ new AudioWorkletNode(context, 'constructor-error');
+ let error = await new Promise(resolve => {
+ constructorErrorWorkletNode.onprocessorerror = (e) => resolve(e);
+ });
+ assert_true(error instanceof ErrorEvent,
+ 'onprocessorerror argument should be an ErrorEvent when ' +
+ 'the constructor of AudioWorkletProcessor has an error.');
+}, 'Test if |onprocessorerror| is called for an exception thrown from the ' +
+ 'processor constructor.');
+
+promise_test(async () => {
+ // An arbitrary Blob for testing. This is not deserializable on
+ // AudioWorkletGlobalScope.
+ const blob = new Blob([JSON.stringify({ hello: "world"}, null, 2)], {
+ type: "application/json",
+ });
+ const emptyErrorWorkletNode =
+ new AudioWorkletNode(context, 'empty-error', {processorOptions: {blob}});
+ let error = await new Promise(resolve => {
+ emptyErrorWorkletNode.onprocessorerror = (e) => resolve(e);
+ });
+ assert_true(error instanceof ErrorEvent,
+ 'onprocessorerror argument should be an ErrorEvent when ' +
+ 'the constructor of AudioWorkletProcessor has an error.');
+}, 'Test if |onprocessorerror| is called for a transfered object that cannot ' +
+ 'be deserialized on the AudioWorkletGlobalScope.');
+
+promise_test(async () => {
+ const processErrorWorkletNode =
+ new AudioWorkletNode(context, 'process-error');
+ let error = await new Promise(resolve => {
+ processErrorWorkletNode.onprocessorerror = (e) => resolve(e);
+ // Need to start render to cause an exception in process().
+ context.startRendering();
+ });
+ assert_true(error instanceof ErrorEvent,
+ 'onprocessorerror argument should be an ErrorEvent when the ' +
+ 'process method of the AudioWorkletProcessor has an error.');
+}, 'Test if |onprocessorerror| is called upon failure of process() method.');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
new file mode 100644
index 0000000000..8dafa2f811
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
@@ -0,0 +1,80 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test the construction of AudioWorkletNode with real-time context
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const context = new AudioContext();
+
+ setup(function () {
+ context.audioWorklet.addModule(
+ 'processors/channel-count-processor.js').then(() => audit.run());
+
+ // Test if the output channe count dynamically changes if the input
+ // and output is 1.
+ audit.define(
+ {label: 'Dynamically change the channel count to if unspecified.'},
+ (task, should) => {
+ // Use arbitrary parameters for the test.
+ const buffer = new AudioBuffer({
+ numberOfChannels: 17,
+ length: 1,
+ sampleRate: context.sampleRate,
+ });
+ const source = new AudioBufferSourceNode(context);
+ source.buffer = buffer;
+
+ const node = new AudioWorkletNode(context, 'channel-count', {
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ });
+
+ node.port.onmessage = (message) => {
+ const expected = message.data;
+ should(expected.outputChannel,
+ 'The expected output channel count').beEqualTo(17);
+ task.done();
+ };
+
+ // We need to make an actual connection becasue the channel count
+ // change happen when the rendering starts. It is to test if the
+ // channel count adapts to the upstream node correctly.
+ source.connect(node).connect(context.destination);
+ source.start();
+ });
+
+ // Test if outputChannelCount is honored as expected even if the input
+ // and output is 1.
+ audit.define(
+ {label: 'Givien outputChannelCount must be honored.'},
+ (task, should) => {
+ const node = new AudioWorkletNode(
+ context, 'channel-count', {
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ outputChannelCount: [2],
+ });
+
+ node.port.onmessage = (message) => {
+ const expected = message.data;
+ should(expected.outputChannel,
+ 'The expected output channel count').beEqualTo(2);
+ task.done();
+ };
+
+ // We need to make an actual connection becasue the channel count
+ // change might happen when the rendering starts. It is to test
+ // if the specified channel count is kept correctly.
+ node.connect(context.destination);
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html
new file mode 100644
index 0000000000..ea840ed11a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test cross-thread passing of AudioWorkletNodeOptions
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const context = new AudioContext();
+
+ let filePath = 'processors/option-test-processor.js';
+
+ // Create a OptionTestProcessor and feed |processorData| to it. The
+ // processor should echo the received data to the node's |onmessage|
+ // handler.
+ audit.define('valid-processor-data', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ let processorOptions = {
+ description: 'foo',
+ payload: [0, 1, 2, 3]
+ };
+
+ let optionTestNode =
+ new AudioWorkletNode(context, 'option-test-processor', {
+ processorOptions: processorOptions
+ });
+
+ optionTestNode.port.onmessage = (event) => {
+ should(event.data.processorOptions.description,
+ '|description| field in processorOptions from processor("' +
+ event.data.processorOptions.description + '")')
+ .beEqualTo(processorOptions.description,
+ 'the field in node constructor options ("' +
+ processorOptions.description + '")');
+ should(event.data.processorOptions.payload,
+ '|payload| array in processorOptions from processor([' +
+ event.data.processorOptions.payload + '])')
+ .beEqualToArray([0, 1, 2, 3],
+ 'the array in node constructor options ([' +
+ event.data.processorOptions.payload + '])');
+ task.done();
+ };
+ });
+ });
+
+
+ // Passing empty option dictionary should work without a problem.
+ audit.define('empty-option', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ let optionTestNode =
+ new AudioWorkletNode(context, 'option-test-processor');
+
+ optionTestNode.port.onmessage = (event) => {
+ should(Object.keys(event.data).length,
+ 'Number of properties in data from processor')
+ .beEqualTo(2);
+ should(event.data.numberOfInputs,
+ '|numberOfInputs| field in data from processor')
+ .beEqualTo(1);
+ should(event.data.numberOfOutputs,
+ '|numberOfOutputs| field in data from processor')
+ .beEqualToArray(1);
+ task.done();
+ };
+ });
+ });
+
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html
new file mode 100644
index 0000000000..e3fb6e533d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test if AudioWorkletProcessor with invalid parameters array getter
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrarily determined. Any numbers should work.
+ let sampleRate = 16000;
+ let renderLength = 1280;
+ let context;
+ let filePath = 'processors/invalid-param-array-processor.js';
+
+ audit.define('Initializing AudioWorklet and Context', async (task) => {
+ context = new OfflineAudioContext(1, renderLength, sampleRate);
+ await context.audioWorklet.addModule(filePath);
+ task.done();
+ });
+
+ audit.define('Verifying AudioParam in AudioWorkletNode',
+ async (task, should) => {
+ let buffer = context.createBuffer(1, 2, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+
+ let source = new AudioBufferSourceNode(context);
+ source.buffer = buffer;
+ source.loop = true;
+ source.start();
+
+ let workletNode1 =
+ new AudioWorkletNode(context, 'invalid-param-array-1');
+ let workletNode2 =
+ new AudioWorkletNode(context, 'invalid-param-array-2');
+ workletNode1.connect(workletNode2).connect(context.destination);
+
+ // Manually invoke the param getter.
+ source.connect(workletNode2.parameters.get('invalidParam'));
+
+ const renderedBuffer = await context.startRendering();
+
+ // |workletNode2| should be no-op after the parameter getter is
+ // invoked. Therefore, the rendered result should be silent.
+ should(renderedBuffer.getChannelData(0), 'The rendered buffer')
+ .beConstantValueOf(0);
+ task.done();
+ }
+ );
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html
new file mode 100644
index 0000000000..ce0cfa40b6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html
@@ -0,0 +1,56 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test given arrays within AudioWorkletProcessor.process() method
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+ const filePath = 'processors/array-check-processor.js';
+ const context = new AudioContext();
+
+ // Test if the incoming arrays are frozen as expected.
+ audit.define('check-frozen-array', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ const workletNode =
+ new AudioWorkletNode(context, 'array-frozen-processor');
+ workletNode.port.onmessage = (message) => {
+ const actual = message.data;
+ should(actual.isInputFrozen, '|inputs| is frozen').beTrue();
+ should(actual.isOutputFrozen, '|outputs| is frozen').beTrue();
+ task.done();
+ };
+ });
+ });
+
+ // The incoming arrays should not be transferred, but the associated
+ // ArrayBuffers can be transferred. See the `array-transfer-processor`
+ // definition for the details.
+ audit.define('transfer-frozen-array', (task, should) => {
+ const sourceNode = new ConstantSourceNode(context);
+ const workletNode =
+ new AudioWorkletNode(context, 'array-transfer-processor');
+ workletNode.port.onmessage = (message) => {
+ const actual = message.data;
+ if (actual.type === 'assertion')
+ should(actual.success, actual.message).beTrue();
+ if (actual.done)
+ task.done();
+ };
+ // To have valid ArrayBuffers for both input and output, we need
+ // both connections.
+ // See: https://github.com/WebAudio/web-audio-api/issues/2566
+ sourceNode.connect(workletNode).connect(context.destination);
+ sourceNode.start();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html
new file mode 100644
index 0000000000..e1c19f0d75
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html
@@ -0,0 +1,36 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test if |outputs| argument is all zero in AudioWorkletProcessor.process()
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+ const filePath = 'processors/zero-outputs-check-processor.js';
+ const context = new AudioContext();
+
+ // Test if the incoming arrays are frozen as expected.
+ audit.define('check-zero-outputs', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ const workletNode =
+ new AudioWorkletNode(context, 'zero-outputs-check-processor');
+ workletNode.port.onmessage = (message) => {
+ const actual = message.data;
+ if (actual.type === 'assertion') {
+ should(actual.success, actual.message).beTrue();
+ task.done();
+ }
+ };
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html
new file mode 100644
index 0000000000..079b57b959
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test micro task checkpoints in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <meta charset=utf-8>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ promise_test(async () => {
+ const context = new AudioContext();
+
+ let filePath = 'processors/promise-processor.js';
+
+ await context.audioWorklet.addModule(filePath);
+ await context.suspend();
+ let node1 = new AudioWorkletNode(context, 'promise-processor');
+ let node2 = new AudioWorkletNode(context, 'promise-processor');
+
+ // Connecting to the destination is not strictly necessary in theory,
+ // but see
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=1045926
+ // for why it is in practice.
+ node1.connect(node2).connect(context.destination);
+
+ await context.resume();
+
+ // The second node is the one that is going to receive the message,
+ // per spec: it is the second that will be processed, each time.
+ const e = await new Promise((resolve) => {
+ node2.port.onmessage = resolve;
+ });
+ context.close();
+ assert_equals(e.data, "ok",
+ `Microtask checkpoints are performed
+ in between render quantum`);
+ }, "test");
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html
new file mode 100644
index 0000000000..4281f56379
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html
@@ -0,0 +1,30 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Checking BaseAudioContext.audioWorklet
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let realtimeContext = new AudioContext();
+ let offlineContext = new OfflineAudioContext(1, 1, 44100);
+
+ // Test if AudioWorklet exists.
+ audit.define('Test if AudioWorklet exists', (task, should) => {
+ should(realtimeContext.audioWorklet instanceof AudioWorklet &&
+ offlineContext.audioWorklet instanceof AudioWorklet,
+ 'BaseAudioContext.audioWorklet is an instance of AudioWorklet')
+ .beTrue();
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html
new file mode 100644
index 0000000000..75f4aa4020
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html
@@ -0,0 +1,16 @@
+<!doctype html>
+<title>Test AudioWorkletNode subclass with parameters</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+class Extended extends AudioWorkletNode {}
+
+const modulePath = 'processors/gain-processor.js';
+
+promise_test(async () => {
+ const context = new AudioContext();
+ await context.audioWorklet.addModule(modulePath);
+ const node = new Extended(context, 'gain');
+ assert_equals(Object.getPrototypeOf(node), Extended.prototype);
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html
new file mode 100644
index 0000000000..a4c59123a1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html
@@ -0,0 +1,23 @@
+<!doctype html>
+<title>Test use of 'process' getter for AudioWorkletProcessor callback</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+const do_test = async (node_name) => {
+ const context = new AudioContext();
+ const filePath = `processors/${node_name}-processor.js`;
+ await context.audioWorklet.addModule(filePath);
+ const node = new AudioWorkletNode(context, node_name);
+ const event = await new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ });
+ assert_equals(event.data.message, "done");
+};
+
+// Includes testing for https://github.com/WebAudio/web-audio-api/pull/2104
+promise_test(async () => do_test('process-getter-test-prototype'),
+ "'process' getter on prototype");
+
+promise_test(async () => do_test('process-getter-test-instance'),
+ "'process' getter on instance");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html
new file mode 100644
index 0000000000..4c6a10dfab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html
@@ -0,0 +1,87 @@
+<!doctype html>
+<title>Test parameters of process() AudioWorkletProcessor callback</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+var context;
+promise_setup(async (t) => {
+ context = new AudioContext();
+ const filePath = 'processors/process-parameter-test-processor.js';
+ await context.audioWorklet.addModule(filePath);
+});
+
+const get_parameters = async (node, options) => {
+ const event = await new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ });
+ const inputs = event.data.inputs;
+ assert_equals(inputs.length, options.numberOfInputs, 'inputs length');
+ const outputs = event.data.outputs;
+ assert_equals(outputs.length, options.numberOfOutputs, 'outputs length');
+ for (let port = 0; port < inputs.length; ++port) {
+ for (let channel = 0; channel < inputs[port].length; ++channel) {
+ assert_equals(inputs[port][channel].length, 128,
+ `inputs[${port}][${channel}].length`);
+ }
+ }
+ for (let port = 0; port < outputs.length; ++port) {
+ for (let channel = 0; channel < outputs[port].length; ++channel) {
+ assert_equals(outputs[port][channel].length, 128,
+ `outputs[${port}][${channel}].length`);
+ }
+ }
+ return event.data;
+};
+
+promise_test(async (t) => {
+ const options = {
+ numberOfInputs: 3,
+ numberOfOutputs: 0
+ };
+ // Connect a source so that one channel of one input is active.
+ context.suspend();
+ const source = new ConstantSourceNode(context);
+ source.start();
+ const merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ const active_channel_index = merger.numberOfInputs - 1;
+ source.connect(merger, 0, active_channel_index);
+ const node = new AudioWorkletNode(context, 'process-parameter-test', options);
+ const active_port_index = options.numberOfInputs - 1;
+ merger.connect(node, 0, active_port_index);
+ context.resume();
+ const {inputs} = await get_parameters(node, options);
+ for (let port = 0; port < inputs.length - 1; ++port) {
+ if (port != active_port_index) {
+ assert_equals(inputs[port].length, 0, `inputs[${port}].length`);
+ }
+ }
+ const active_input = inputs[active_port_index];
+ assert_equals(active_input.length, merger.numberOfInputs,
+ 'active_input.length');
+ for (let channel = 0; channel < active_input.length; ++channel) {
+ let expected = channel == active_channel_index ? 1.0 : 0.0;
+ for (let sample = 0; sample < inputs.length; ++sample) {
+ assert_equals(active_input[channel][sample], expected,
+ `active_input[${channel}][${sample}]`);
+ }
+ }
+}, '3 inputs; 0 outputs');
+
+promise_test(async (t) => {
+ const options = {
+ numberOfInputs: 0,
+ numberOfOutputs: 3
+ };
+ const node = new AudioWorkletNode(context, 'process-parameter-test', options);
+ const {outputs} = await get_parameters(node, options);
+ for (let port = 0; port < outputs.length; ++port) {
+ assert_equals(outputs[port].length, 1, `outputs[${port}].length`);
+ for (let channel = 0; channel < outputs[port].length; ++channel) {
+ for (let sample = 0; sample < outputs.length; ++sample) {
+ assert_equals(outputs[port][channel][sample], 0.0,
+ `outputs[${port}][${channel}][${sample}]`);
+ }
+ }
+ }
+}, '0 inputs; 3 outputs');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html
new file mode 100644
index 0000000000..6f1aa59225
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html
@@ -0,0 +1,61 @@
+<!doctype html>
+<title>Test processor port assignment on processor callback function construction</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+// https://webaudio.github.io/web-audio-api/#AudioWorkletProcessor-instantiation
+
+const get_context_for_node_name = async (node_name) => {
+ const context = new AudioContext();
+ const filePath = `processors/construction-port-${node_name}.js`;
+ await context.audioWorklet.addModule(filePath);
+ return context;
+}
+
+const test_throws = async ({node_name, thrower} = {}) => {
+ const context = await get_context_for_node_name(node_name);
+ const node = new AudioWorkletNode(context, node_name);
+ const event = await new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ });
+ assert_true(event.data.threw, `${thrower} should throw`);
+ assert_equals(event.data.errorName, "TypeError");
+ assert_true(event.data.isTypeError, "exception should be TypeError");
+};
+
+const throw_tests = [
+ {
+ test_name: 'super() after new AudioWorkletProcessor()',
+ node_name: 'super-after-new',
+ thrower: 'super()'
+ },
+ {
+ test_name: 'new AudioWorkletProcessor() after super()',
+ node_name: 'new-after-super',
+ thrower: 'new AudioWorkletProcessor()'
+ },
+ {
+ test_name: 'new AudioWorkletProcessor() after new AudioWorkletProcessor()',
+ node_name: 'new-after-new',
+ thrower: 'new AudioWorkletProcessor()'
+ }
+];
+for (const test_info of throw_tests) {
+ promise_test(async () => test_throws(test_info), test_info.test_name);
+}
+
+promise_test(async (t) => {
+ const node_name = 'singleton';
+ const context = await get_context_for_node_name(node_name);
+ const node1 = new AudioWorkletNode(context, node_name);
+ const node2 = new AudioWorkletNode(context, node_name);
+ node2.onmessage = t.unreached_func("node2 should not receive a message");
+ let count = 0;
+ await new Promise((resolve) => {
+ node1.port.onmessage = t.step_func((event) => {
+ assert_less_than(count, 2, "message count");
+ if (++count == 2) { resolve(); };
+ });
+ });
+}, 'Singleton AudioWorkletProcessor');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js
new file mode 100644
index 0000000000..ef497733ca
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js
@@ -0,0 +1,54 @@
+/**
+ * @class ActiveProcessingTester
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class sends a message to its AudioWorkletNodew whenever the
+ * number of channels on the input changes. The message includes the actual
+ * number of channels, the context time at which this occurred, and whether
+ * we're done processing or not.
+ */
+class ActiveProcessingTester extends AudioWorkletProcessor {
+ constructor(options) {
+ super(options);
+ this._lastChannelCount = 0;
+
+ // See if user specified a value for test duration.
+ if (options.hasOwnProperty('processorOptions') &&
+ options.processorOptions.hasOwnProperty('testDuration')) {
+ this._testDuration = options.processorOptions.testDuration;
+ } else {
+ this._testDuration = 5;
+ }
+
+ // Time at which we'll signal we're done, based on the requested
+ // |testDuration|
+ this._endTime = currentTime + this._testDuration;
+ }
+
+ process(inputs, outputs) {
+ const input = inputs[0];
+ const output = outputs[0];
+ const inputChannelCount = input.length;
+ const isFinished = currentTime > this._endTime;
+
+ // Send a message if we're done or the count changed.
+ if (isFinished || (inputChannelCount != this._lastChannelCount)) {
+ this.port.postMessage({
+ channelCount: inputChannelCount,
+ finished: isFinished,
+ time: currentTime
+ });
+ this._lastChannelCount = inputChannelCount;
+ }
+
+ // Just copy the input to the output for no particular reason.
+ for (let channel = 0; channel < input.length; ++channel) {
+ output[channel].set(input[channel]);
+ }
+
+ // When we're finished, this method no longer needs to be called.
+ return !isFinished;
+ }
+}
+
+registerProcessor('active-processing-tester', ActiveProcessingTester);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js
new file mode 100644
index 0000000000..d05056bd84
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js
@@ -0,0 +1,34 @@
+/*
+ * @class AddOffsetProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * Just adds a fixed value to the input
+ */
+class AddOffsetProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super();
+
+ this._offset = options.processorOptions.offset;
+ }
+
+ process(inputs, outputs) {
+ // This processor assumes the node has at least 1 input and 1 output.
+ let input = inputs[0];
+ let output = outputs[0];
+ let outputChannel = output[0];
+
+ if (input.length > 0) {
+ let inputChannel = input[0];
+ for (let k = 0; k < outputChannel.length; ++k)
+ outputChannel[k] = inputChannel[k] + this._offset;
+ } else {
+ // No input connected, so pretend it's silence and just fill the
+ // output with the offset value.
+ outputChannel.fill(this._offset);
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('add-offset-processor', AddOffsetProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js
new file mode 100644
index 0000000000..d6eeff3d15
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js
@@ -0,0 +1,94 @@
+/**
+ * @class ArrayFrozenProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ArrayFrozenProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this._messageSent = false;
+ }
+
+ process(inputs, outputs, parameters) {
+ const input = inputs[0];
+ const output = outputs[0];
+
+ if (!this._messageSent) {
+ this.port.postMessage({
+ inputLength: input.length,
+ isInputFrozen: Object.isFrozen(inputs) && Object.isFrozen(input),
+ outputLength: output.length,
+ isOutputFrozen: Object.isFrozen(outputs) && Object.isFrozen(output)
+ });
+ this._messageSent = true;
+ }
+
+ return false;
+ }
+}
+
+/**
+ * @class ArrayTransferProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ArrayTransferProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this._messageSent = false;
+ }
+
+ process(inputs, outputs, parameters) {
+ const input = inputs[0];
+ const output = outputs[0];
+
+ if (!this._messageSent) {
+ try {
+ // Transferring Array objects should NOT work.
+ this.port.postMessage({
+ inputs, input, inputChannel: input[0],
+ outputs, output, outputChannel: output[0]
+ }, [inputs, input, inputs[0], outputs, output, output[0]]);
+ // Hence, the following must NOT be reached.
+ this.port.postMessage({
+ type: 'assertion',
+ success: false,
+ message: 'Transferring inputs/outputs, an individual input/output ' +
+ 'array, or a channel Float32Array MUST fail, but succeeded.'
+ });
+ } catch (error) {
+ this.port.postMessage({
+ type: 'assertion',
+ success: true,
+ message: 'Transferring inputs/outputs, an individual input/output ' +
+ 'array, or a channel Float32Array is not allowed as expected.'
+ });
+ }
+
+ try {
+ // Transferring ArrayBuffers should work.
+ this.port.postMessage(
+ {inputChannel: input[0], outputChannel: output[0]},
+ [input[0].buffer, output[0].buffer]);
+ this.port.postMessage({
+ type: 'assertion',
+ success: true,
+ message: 'Transferring ArrayBuffers was successful as expected.'
+ });
+ } catch (error) {
+ // This must NOT be reached.
+ this.port.postMessage({
+ type: 'assertion',
+ success: false,
+ message: 'Transferring ArrayBuffers unexpectedly failed.'
+ });
+ }
+
+ this.port.postMessage({done: true});
+ this._messageSent = true;
+ }
+
+ return false;
+ }
+}
+
+registerProcessor('array-frozen-processor', ArrayFrozenProcessor);
+registerProcessor('array-transfer-processor', ArrayTransferProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js
new file mode 100644
index 0000000000..556459f46b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js
@@ -0,0 +1,19 @@
+/**
+ * @class ChannelCountProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ChannelCountProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super(options);
+ }
+
+ process(inputs, outputs) {
+ this.port.postMessage({
+ inputChannel: inputs[0].length,
+ outputChannel: outputs[0].length
+ });
+ return false;
+ }
+}
+
+registerProcessor('channel-count', ChannelCountProcessor); \ No newline at end of file
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js
new file mode 100644
index 0000000000..d4c63f7775
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js
@@ -0,0 +1,16 @@
+class NewAfterNew extends AudioWorkletProcessor {
+ constructor() {
+ const processor = new AudioWorkletProcessor()
+ let message = {threw: false};
+ try {
+ new AudioWorkletProcessor();
+ } catch (e) {
+ message.threw = true;
+ message.errorName = e.name;
+ message.isTypeError = e instanceof TypeError;
+ }
+ processor.port.postMessage(message);
+ return processor;
+ }
+}
+registerProcessor("new-after-new", NewAfterNew);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js
new file mode 100644
index 0000000000..a6d4f0e2e8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js
@@ -0,0 +1,15 @@
+class NewAfterSuper extends AudioWorkletProcessor {
+ constructor() {
+ super()
+ let message = {threw: false};
+ try {
+ new AudioWorkletProcessor()
+ } catch (e) {
+ message.threw = true;
+ message.errorName = e.name;
+ message.isTypeError = e instanceof TypeError;
+ }
+ this.port.postMessage(message);
+ }
+}
+registerProcessor("new-after-super", NewAfterSuper);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js
new file mode 100644
index 0000000000..c40b5a7179
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js
@@ -0,0 +1,16 @@
+let singleton;
+class Singleton extends AudioWorkletProcessor {
+ constructor() {
+ if (!singleton) {
+ singleton = new AudioWorkletProcessor();
+ singleton.process = function() {
+ this.port.postMessage({message: "process called"});
+ // This function will be called at most once for each AudioWorkletNode
+ // if the node has no input connections.
+ return false;
+ }
+ }
+ return singleton;
+ }
+}
+registerProcessor("singleton", Singleton);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js
new file mode 100644
index 0000000000..e447830c5f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js
@@ -0,0 +1,16 @@
+class SuperAfterNew extends AudioWorkletProcessor {
+ constructor() {
+ const processor = new AudioWorkletProcessor()
+ let message = {threw: false};
+ try {
+ super();
+ } catch (e) {
+ message.threw = true;
+ message.errorName = e.name;
+ message.isTypeError = e instanceof TypeError;
+ }
+ processor.port.postMessage(message);
+ return processor;
+ }
+}
+registerProcessor("super-after-new", SuperAfterNew);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/denormal-test-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/denormal-test-processor.js
new file mode 100644
index 0000000000..2b7929437d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/denormal-test-processor.js
@@ -0,0 +1,12 @@
+class DenormalTestProcessor extends AudioWorkletProcessor {
+ process() {
+ // The denormals should be non-zeros. Otherwise, it's a violation of
+ // ECMA specification: https://tc39.es/ecma262/#sec-number.min_value
+ this.port.postMessage({
+ result: Number.MIN_VALUE !== 0.0
+ });
+ return false;
+ }
+}
+
+registerProcessor('denormal-test', DenormalTestProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js
new file mode 100644
index 0000000000..d1b16cc9aa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js
@@ -0,0 +1,12 @@
+class DummyProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ // Doesn't do anything here.
+ return true;
+ }
+}
+
+globalThis.registerProcessor('dummy-globalthis', DummyProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js
new file mode 100644
index 0000000000..11155d508c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js
@@ -0,0 +1,18 @@
+/**
+ * @class DummyProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the bare-bone structure of the processor.
+ */
+class DummyProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ // Doesn't do anything here.
+ return true;
+ }
+}
+
+registerProcessor('dummy', DummyProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js
new file mode 100644
index 0000000000..5e825aebb4
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js
@@ -0,0 +1,22 @@
+class ProcessorA extends AudioWorkletProcessor {
+ process() {
+ return true;
+ }
+}
+
+// ProcessorB registers ProcessorA upon the construction.
+class ProcessorB extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = () => {
+ registerProcessor('ProcessorA', ProcessorA);
+ this.port.postMessage({});
+ };
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('ProcessorB', ProcessorB);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js
new file mode 100644
index 0000000000..66ff5e2e25
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js
@@ -0,0 +1,40 @@
+/**
+ * @class ConstructorErrorProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ConstructorErrorProcessor extends AudioWorkletProcessor {
+ constructor() {
+ throw 'ConstructorErrorProcessor: an error thrown from constructor.';
+ }
+
+ process() {
+ return true;
+ }
+}
+
+
+/**
+ * @class ProcessErrorProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ProcessErrorProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process() {
+ throw 'ProcessErrorProcessor: an error throw from process method.';
+ return true;
+ }
+}
+
+
+/**
+ * @class EmptyErrorProcessor
+ * @extends AudioWorkletProcessor
+ */
+class EmptyErrorProcessor extends AudioWorkletProcessor { process() {} }
+
+registerProcessor('constructor-error', ConstructorErrorProcessor);
+registerProcessor('process-error', ProcessErrorProcessor);
+registerProcessor('empty-error', EmptyErrorProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js
new file mode 100644
index 0000000000..e9e130e374
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js
@@ -0,0 +1,38 @@
+/**
+ * @class GainProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the bare-bone structure of the processor.
+ */
+class GainProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return [
+ {name: 'gain', defaultValue: 0.707}
+ ];
+ }
+
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+ let gain = parameters.gain;
+ for (let channel = 0; channel < input.length; ++channel) {
+ let inputChannel = input[channel];
+ let outputChannel = output[channel];
+ if (gain.length === 1) {
+ for (let i = 0; i < inputChannel.length; ++i)
+ outputChannel[i] = inputChannel[i] * gain[0];
+ } else {
+ for (let i = 0; i < inputChannel.length; ++i)
+ outputChannel[i] = inputChannel[i] * gain[i];
+ }
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('gain', GainProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js
new file mode 100644
index 0000000000..6d53ba84c7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js
@@ -0,0 +1,22 @@
+/**
+ * @class CountProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class just looks at the number of input channels on the first
+ * input and fills the first output channel with that value.
+ */
+class CountProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+ output[0].fill(input.length);
+
+ return true;
+ }
+}
+
+registerProcessor('counter', CountProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js
new file mode 100644
index 0000000000..be485f03e8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js
@@ -0,0 +1,27 @@
+/**
+ * @class InputLengthProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class just sets the output to the length of the
+ * input array for verifying that the input length changes when the
+ * input is disconnected.
+ */
+class InputLengthProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+
+ // Set output channel to the length of the input channel array.
+ // If the input is unconnected, set the value to zero.
+ const fillValue = input.length > 0 ? input[0].length : 0;
+ output[0].fill(fillValue);
+
+ return true;
+ }
+}
+
+registerProcessor('input-length-processor', InputLengthProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js
new file mode 100644
index 0000000000..e4a5dc39ba
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js
@@ -0,0 +1,47 @@
+/**
+ * @class InvalidParamArrayProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor intentionally returns an array with an invalid size when the
+ * processor's getter is queried.
+ */
+let singleton = undefined;
+let secondFetch = false;
+let useDescriptor = false;
+let processCounter = 0;
+
+class InvalidParamArrayProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ if (useDescriptor)
+ return [{name: 'invalidParam'}];
+ useDescriptor = true;
+ return [];
+ }
+
+ constructor() {
+ super();
+ if (singleton === undefined)
+ singleton = this;
+ return singleton;
+ }
+
+ process(inputs, outputs, parameters) {
+ const output = outputs[0];
+ for (let channel = 0; channel < output.length; ++channel)
+ output[channel].fill(1);
+ return false;
+ }
+}
+
+// This overridden getter is invoked under the hood before process() gets
+// called. After this gets called, process() method above will be invalidated,
+// and mark the worklet node non-functional. (i.e. in an error state)
+Object.defineProperty(Object.prototype, 'invalidParam', {'get': () => {
+ if (secondFetch)
+ return new Float32Array(256);
+ secondFetch = true;
+ return new Float32Array(128);
+}});
+
+registerProcessor('invalid-param-array-1', InvalidParamArrayProcessor);
+registerProcessor('invalid-param-array-2', InvalidParamArrayProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js
new file mode 100644
index 0000000000..0bcc43f6f0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js
@@ -0,0 +1,49 @@
+/**
+ * @class OnePoleFilter
+ * @extends AudioWorkletProcessor
+ *
+ * A simple One-pole filter.
+ */
+
+class OnePoleFilter extends AudioWorkletProcessor {
+
+ // This gets evaluated as soon as the global scope is created.
+ static get parameterDescriptors() {
+ return [{
+ name: 'frequency',
+ defaultValue: 250,
+ minValue: 0,
+ maxValue: 0.5 * sampleRate
+ }];
+ }
+
+ constructor() {
+ super();
+ this.updateCoefficientsWithFrequency_(250);
+ }
+
+ updateCoefficientsWithFrequency_(frequency) {
+ this.b1_ = Math.exp(-2 * Math.PI * frequency / sampleRate);
+ this.a0_ = 1.0 - this.b1_;
+ this.z1_ = 0;
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+ let frequency = parameters.frequency;
+ for (let channel = 0; channel < output.length; ++channel) {
+ let inputChannel = input[channel];
+ let outputChannel = output[channel];
+ for (let i = 0; i < outputChannel.length; ++i) {
+ this.updateCoefficientsWithFrequency_(frequency[i]);
+ this.z1_ = inputChannel[i] * this.a0_ + this.z1_ * this.b1_;
+ outputChannel[i] = this.z1_;
+ }
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('one-pole-filter', OnePoleFilter);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js
new file mode 100644
index 0000000000..27e1da6325
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js
@@ -0,0 +1,19 @@
+/**
+ * @class OptionTestProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the option passing feature by echoing the
+ * received |nodeOptions| back to the node.
+ */
+class OptionTestProcessor extends AudioWorkletProcessor {
+ constructor(nodeOptions) {
+ super();
+ this.port.postMessage(nodeOptions);
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('option-test-processor', OptionTestProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js
new file mode 100644
index 0000000000..d7ce836500
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js
@@ -0,0 +1,30 @@
+/**
+ * @class ParamSizeProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor is a source node which basically outputs the size of the
+ * AudioParam array for each render quantum.
+ */
+
+class ParamSizeProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return [{name: 'param'}];
+ }
+
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let output = outputs[0];
+ let param = parameters.param;
+
+ for (let channel = 0; channel < output.length; ++channel) {
+ output[channel].fill(param.length);
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('param-size', ParamSizeProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js
new file mode 100644
index 0000000000..8def5a61d7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js
@@ -0,0 +1,34 @@
+/**
+ * @class PortProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the message port functionality.
+ */
+class PortProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.handleMessage.bind(this);
+ this.port.postMessage({
+ state: 'created',
+ timeStamp: currentTime,
+ currentFrame: currentFrame
+ });
+ this.processCallCount = 0;
+ }
+
+ handleMessage(event) {
+ this.port.postMessage({
+ message: event.data,
+ timeStamp: currentTime,
+ currentFrame: currentFrame,
+ processCallCount: this.processCallCount
+ });
+ }
+
+ process() {
+ ++this.processCallCount;
+ return true;
+ }
+}
+
+registerProcessor('port-processor', PortProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js
new file mode 100644
index 0000000000..b1434f54ba
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js
@@ -0,0 +1,44 @@
+/**
+ * @class ProcessGetterTestInstanceProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class tests that a 'process' getter on an
+ * AudioWorkletProcessorConstructor instance is called at the right times.
+ */
+
+class ProcessGetterTestInstanceProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.getterCallCount = 0;
+ this.totalProcessCallCount = 0;
+ Object.defineProperty(this, 'process', { get: function() {
+ if (!(this instanceof ProcessGetterTestInstanceProcessor)) {
+ throw new Error('`process` getter called with bad `this`.');
+ }
+ ++this.getterCallCount;
+ let functionCallCount = 0;
+ return () => {
+ if (++functionCallCount > 1) {
+ const message = 'Closure of function returned from `process` getter' +
+ ' should be used for only one call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ if (++this.totalProcessCallCount < 2) {
+ return true; // Expect another getter call.
+ }
+ if (this.totalProcessCallCount != this.getterCallCount) {
+ const message =
+ 'Getter should be called only once for each process() call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ this.port.postMessage({message: 'done'});
+ return false; // No more calls required.
+ };
+ }});
+ }
+}
+
+registerProcessor('process-getter-test-instance',
+ ProcessGetterTestInstanceProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js
new file mode 100644
index 0000000000..cef5fa8b52
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js
@@ -0,0 +1,55 @@
+/**
+ * @class ProcessGetterTestPrototypeProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class tests that a 'process' getter on
+ * AudioWorkletProcessorConstructor is called at the right times.
+ */
+
+// Reporting errors during registerProcess() is awkward.
+// The occurrance of an error is flagged, so that a trial registration can be
+// performed and registration against the expected AudioWorkletNode name is
+// performed only if no errors are flagged during the trial registration.
+let error_flag = false;
+
+class ProcessGetterTestPrototypeProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.getterCallCount = 0;
+ this.totalProcessCallCount = 0;
+ }
+ get process() {
+ if (!(this instanceof ProcessGetterTestPrototypeProcessor)) {
+ error_flag = true;
+ throw new Error('`process` getter called with bad `this`.');
+ }
+ ++this.getterCallCount;
+ let functionCallCount = 0;
+ return () => {
+ if (++functionCallCount > 1) {
+ const message = 'Closure of function returned from `process` getter' +
+ ' should be used for only one call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ if (++this.totalProcessCallCount < 2) {
+ return true; // Expect another getter call.
+ }
+ if (this.totalProcessCallCount != this.getterCallCount) {
+ const message =
+ 'Getter should be called only once for each process() call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ this.port.postMessage({message: 'done'});
+ return false; // No more calls required.
+ };
+ }
+}
+
+registerProcessor('trial-process-getter-test-prototype',
+ ProcessGetterTestPrototypeProcessor);
+if (!error_flag) {
+ registerProcessor('process-getter-test-prototype',
+ ProcessGetterTestPrototypeProcessor);
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js
new file mode 100644
index 0000000000..a300d3cdec
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js
@@ -0,0 +1,18 @@
+/**
+ * @class ProcessParameterTestProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class forwards input and output parameters to its
+ * AudioWorkletNode.
+ */
+class ProcessParameterTestProcessor extends AudioWorkletProcessor {
+ process(inputs, outputs) {
+ this.port.postMessage({
+ inputs: inputs,
+ outputs: outputs
+ });
+ return false;
+ }
+}
+
+registerProcessor('process-parameter-test', ProcessParameterTestProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js
new file mode 100644
index 0000000000..6a8144b3cc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js
@@ -0,0 +1,40 @@
+/**
+ * @class PromiseProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor creates and resolves a promise in its `process` method. When
+ * the handler passed to `then()` is called, a counter that is global in the
+ * global scope is incremented. There are two copies of this
+ * AudioWorkletNode/Processor, so the counter should always be even in the
+ * process method of the AudioWorklet processing, since the Promise completion
+ * handler are resolved in between render quanta.
+ *
+ * After a few iterations of the test, one of the worklet posts back the string
+ * "ok" to the main thread, and the test is considered a success.
+ */
+var idx = 0;
+
+class PromiseProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super(options);
+ }
+
+ process(inputs, outputs) {
+ if (idx % 2 != 0) {
+ this.port.postMessage("ko");
+ // Don't bother continuing calling process in this case, the test has
+ // already failed.
+ return false;
+ }
+ Promise.resolve().then(() => {
+ idx++;
+ if (idx == 100) {
+ this.port.postMessage("ok");
+ }
+ });
+ // Ensure process is called again.
+ return true;
+ }
+}
+
+registerProcessor('promise-processor', PromiseProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/register-processor-typeerrors.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/register-processor-typeerrors.js
new file mode 100644
index 0000000000..93894842fc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/register-processor-typeerrors.js
@@ -0,0 +1,39 @@
+// For cross-thread messaging.
+class MessengerProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.startTest.bind(this);
+ }
+
+ process() {}
+
+ startTest(message) {
+ runRegisterProcessorTest(this.port);
+ }
+}
+
+function runRegisterProcessorTest(messagePort) {
+ try {
+ // TypeError when a given parameter is not a Function.
+ const DummyObject = {};
+ registerProcessor('type-error-on-object', DummyObject);
+ } catch (exception) {
+ messagePort.postMessage({
+ name: exception.name,
+ message: exception.message
+ });
+ }
+
+ try {
+ // TypeError When a given parameter is a Function, but not a constructor.
+ const DummyFunction = () => {};
+ registerProcessor('type-error-on-function', DummyFunction);
+ } catch (exception) {
+ messagePort.postMessage({
+ name: exception.name,
+ message: exception.message
+ });
+ }
+}
+
+registerProcessor('messenger-processor', MessengerProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js
new file mode 100644
index 0000000000..2ccacccd4b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js
@@ -0,0 +1,35 @@
+/**
+ * @class SharedArrayBufferProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates passing SharedArrayBuffers to and from
+ * workers.
+ */
+class SharedArrayBufferProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.handleMessage.bind(this);
+ this.port.onmessageerror = this.handleMessageError.bind(this);
+ let sab = new SharedArrayBuffer(8);
+ this.port.postMessage({state: 'created', sab});
+ }
+
+ handleMessage(event) {
+ this.port.postMessage({
+ state: 'received message',
+ isSab: event.data instanceof SharedArrayBuffer
+ });
+ }
+
+ handleMessageError(event) {
+ this.port.postMessage({
+ state: 'received messageerror'
+ });
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('sharedarraybuffer-processor', SharedArrayBufferProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js
new file mode 100644
index 0000000000..714e32dbb5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js
@@ -0,0 +1,25 @@
+/**
+ * @class TimingInfoProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class is to test the timing information in AWGS.
+ */
+class TimingInfoProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.echoMessage.bind(this);
+ }
+
+ echoMessage(event) {
+ this.port.postMessage({
+ currentTime: currentTime,
+ currentFrame: currentFrame
+ });
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('timing-info-processor', TimingInfoProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js
new file mode 100644
index 0000000000..2d7399ca3b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js
@@ -0,0 +1,42 @@
+/**
+ * @class ZeroOutputProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor accumulates the incoming buffer and send the buffered data
+ * to the main thread when it reaches the specified frame length. The processor
+ * only supports the single input.
+ */
+
+const kRenderQuantumFrames = 128;
+
+class ZeroOutputProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super();
+
+ this._framesRequested = options.processorOptions.bufferLength;
+ this._framesCaptured = 0;
+ this._buffer = [];
+ for (let i = 0; i < options.processorOptions.channeCount; ++i) {
+ this._buffer[i] = new Float32Array(this._framesRequested);
+ }
+ }
+
+ process(inputs) {
+ let input = inputs[0];
+ let startIndex = this._framesCaptured;
+ let endIndex = startIndex + kRenderQuantumFrames;
+ for (let i = 0; i < this._buffer.length; ++i) {
+ this._buffer[i].subarray(startIndex, endIndex).set(input[i]);
+ }
+ this._framesCaptured = endIndex;
+
+ if (this._framesCaptured >= this._framesRequested) {
+ this.port.postMessage({ capturedBuffer: this._buffer });
+ return false;
+ } else {
+ return true;
+ }
+ }
+}
+
+registerProcessor('zero-output-processor', ZeroOutputProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js
new file mode 100644
index 0000000000..f816e918a2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js
@@ -0,0 +1,78 @@
+/**
+ * Returns true if a given AudioPort is completely filled with zero samples.
+ * "AudioPort" is a short-hand for FrozenArray<FrozenArray<Float32Array>>.
+ *
+ * @param {FrozenArray<FrozenArray<Float32Array>>} audioPort
+ * @returns bool
+ */
+function IsAllZero(audioPort) {
+ for (let busIndex = 0; busIndex < audioPort.length; ++busIndex) {
+ const audioBus = audioPort[busIndex];
+ for (let channelIndex = 0; channelIndex < audioBus.length; ++channelIndex) {
+ const audioChannel = audioBus[channelIndex];
+ for (let sample = 0; sample < audioChannel.length; ++sample) {
+ if (audioChannel[sample] != 0)
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+const kRenderQuantumFrames = 128;
+const kTestLengthInSec = 1.0;
+const kPulseDuration = 100;
+
+/**
+ * Checks the |outputs| argument of AudioWorkletProcessor.process() and
+ * send a message to an associated AudioWorkletNode. It needs to be all zero
+ * at all times.
+ *
+ * @class ZeroOutputsCheckProcessor
+ * @extends {AudioWorkletProcessor}
+ */
+class ZeroOutputsCheckProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.startTime = currentTime;
+ this.counter = 0;
+ }
+
+ process(inputs, outputs) {
+ if (!IsAllZero(outputs)) {
+ this.port.postMessage({
+ type: 'assertion',
+ success: false,
+ message: 'Unexpected Non-zero sample found in |outputs|.'
+ });
+ return false;
+ }
+
+ if (currentTime - this.startTime >= kTestLengthInSec) {
+ this.port.postMessage({
+ type: 'assertion',
+ success: true,
+ message: `|outputs| has been all zeros for ${kTestLengthInSec} ` +
+ 'seconds as expected.'
+ });
+ return false;
+ }
+
+ // Every ~0.25 second (100 render quanta), switch between outputting white
+ // noise and just exiting without doing anything. (from crbug.com/1099756)
+ this.counter++;
+ if (Math.floor(this.counter / kPulseDuration) % 2 == 0)
+ return true;
+
+ let output = outputs[0];
+ for (let channel = 0; channel < output.length; ++channel) {
+ for (let sample = 0; sample < 128; sample++) {
+ output[channel][sample] = 0.1 * (Math.random() - 0.5);
+ }
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('zero-outputs-check-processor', ZeroOutputsCheckProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html
new file mode 100644
index 0000000000..7b9e7f0ac3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html
@@ -0,0 +1,90 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Test Simple AudioWorklet I/O</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // Arbitrary sample rate
+ const sampleRate = 48000;
+
+ // The offset to be applied by the worklet to its inputs.
+ const offset = 1;
+
+ // Location of the worklet's code
+ const filePath = 'processors/add-offset.js';
+
+ let audit = Audit.createTaskRunner();
+
+ // Context to be used for the tests.
+ let context;
+
+ audit.define('Initialize worklet', (task, should) => {
+ // Two channels for testing. Channel 0 is the output of the
+ // AudioWorklet. Channel 1 is the oscillator so we can compare
+ // the outputs.
+ context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: sampleRate, sampleRate: sampleRate});
+
+ // Load up the code for the worklet.
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'Creation of AudioWorklet')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'test', description: 'Simple AudioWorklet I/O'},
+ (task, should) => {
+ let merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ let worklet = new AudioWorkletNode(
+ context, 'add-offset-processor',
+ {processorOptions: {offset: offset}});
+
+ src.connect(worklet).connect(merger, 0, 0);
+ src.connect(merger, 0, 1);
+
+ // Start and stop the source. The stop time is fairly arbitrary,
+ // but use a render quantum boundary for simplicity.
+ const stopFrame = RENDER_QUANTUM_FRAMES;
+ src.start(0);
+ src.stop(stopFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(buffer => {
+ let ch0 = buffer.getChannelData(0);
+ let ch1 = buffer.getChannelData(1);
+
+ let shifted = ch1.slice(0, stopFrame).map(x => x + offset);
+
+ // The initial part of the output should be the oscillator
+ // shifted by |offset|.
+ should(
+ ch0.slice(0, stopFrame),
+ `AudioWorklet output[0:${stopFrame - 1}]`)
+ .beCloseToArray(shifted, {absoluteThreshold: 0});
+
+ // Output should be constant after the source has stopped.
+ should(
+ ch0.slice(stopFrame),
+ `AudioWorklet output[${stopFrame}:]`)
+ .beConstantValueOf(offset);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html
new file mode 100644
index 0000000000..f6fa6ddd98
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html
@@ -0,0 +1,51 @@
+<!doctype html>
+<title>Test MessagePort while AudioContext is not running</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+const get_node_and_message = (context) => {
+ const node = new AudioWorkletNode(context, 'port-processor');
+ return new Promise((resolve) => {
+ node.port.onmessage = (event) => resolve({node: node, event: event});
+ });
+};
+const ping_for_message = (node) => {
+ return new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ node.port.postMessage('ping');
+ });
+};
+const modulePath = 'processors/port-processor.js';
+
+promise_test(async () => {
+ const realtime = new AudioContext();
+ await realtime.audioWorklet.addModule(modulePath);
+ await realtime.suspend();
+ const currentTime = realtime.currentTime;
+ let {node, event} = await get_node_and_message(realtime);
+ assert_equals(event.data.timeStamp, currentTime, 'created message time');
+ event = await ping_for_message(node);
+ assert_equals(event.data.timeStamp, currentTime, 'pong time');
+}, 'realtime suspended');
+
+let offline;
+promise_test(async () => {
+ offline = new OfflineAudioContext({length: 128 + 1, sampleRate: 16384});
+ await offline.audioWorklet.addModule(modulePath);
+ assert_equals(offline.currentTime, 0, 'time before start');
+ let {node, event} = await get_node_and_message(offline);
+ assert_equals(event.data.timeStamp, 0, 'created time before start');
+ event = await ping_for_message(node);
+ assert_equals(event.data.timeStamp, 0, 'pong time before start');
+}, 'offline before start');
+
+promise_test(async () => {
+ await offline.startRendering();
+ const expected = 2 * 128 / offline.sampleRate;
+ assert_equals(offline.currentTime, expected, 'time on complete');
+ let {node, event} = await get_node_and_message(offline);
+ assert_equals(event.data.timeStamp, expected, "created time on complete");
+ event = await ping_for_message(node);
+ assert_equals(event.data.timeStamp, expected, "pong time on complete");
+}, 'offline on complete');
+</script>