summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface
diff options
context:
space:
mode:
Diffstat (limited to 'testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface')
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html144
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html855
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html161
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html103
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html240
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html63
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html73
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html60
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html143
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html497
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html57
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html426
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html120
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js155
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html167
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html155
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html411
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html164
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html79
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html456
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html111
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html139
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html176
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html156
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html49
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html145
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html578
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html88
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html238
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html178
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html48
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html92
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html70
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html70
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html80
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html74
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html67
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js29
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html93
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html48
45 files changed, 7368 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html
new file mode 100644
index 0000000000..ab527b6695
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html
@@ -0,0 +1,144 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Adding Events</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audio-param.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary power of two to eliminate round-off in computing time from
+ // frame.
+ const sampleRate = 8192;
+
+ audit.define(
+ {
+ label: 'linearRamp',
+ description: 'Insert linearRamp after running for some time'
+ },
+ (task, should) => {
+ testInsertion(should, {
+ method: 'linearRampToValueAtTime',
+ prefix: 'linearRamp'
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'expoRamp',
+ description: 'Insert expoRamp after running for some time'
+ },
+ (task, should) => {
+ testInsertion(should, {
+ method: 'exponentialRampToValueAtTime',
+ prefix: 'expoRamp'
+ }).then(() => task.done());
+ });
+
+ // Test insertion of an event in the middle of rendering.
+ //
+ // options dictionary:
+ // method - automation method to test
+ // prefix - string to use for prefixing messages
+ function testInsertion(should, options) {
+ let {method, prefix} = options;
+
+ // Channel 0 is the output for the test, and channel 1 is the
+ // reference output.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: sampleRate, sampleRate: sampleRate});
+ let merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // Initial value and final values of the source node
+ let initialValue = 1;
+ let finalValue = 2;
+
+ // Set up the node for the automations under test
+ let src = new ConstantSourceNode(context, {offset: initialValue});
+ src.connect(merger, 0, 0);
+
+ // Set initial event to occur at this time. Keep it in the first
+ // render quantum.
+ const initialEventTime = 64 / context.sampleRate;
+ should(
+ () => src.offset.setValueAtTime(initialValue, initialEventTime),
+ `${prefix}: setValueAtTime(${initialValue}, ${initialEventTime})`)
+ .notThrow();
+
+ // Let time pass and then add a new event with time in the future.
+ let insertAtFrame = 512;
+ let insertTime = insertAtFrame / context.sampleRate;
+ let automationEndFrame = 1024 + 64;
+ let automationEndTime = automationEndFrame / context.sampleRate;
+ context.suspend(insertTime)
+ .then(() => {
+ should(
+ () => src.offset[method](finalValue, automationEndTime),
+ `${prefix}: At time ${insertTime} scheduling ${method}(${
+ finalValue}, ${automationEndTime})`)
+ .notThrow();
+ })
+ .then(() => context.resume());
+
+ // Set up graph for the reference result. Automate the source with
+ // the events scheduled from the beginning. Let the gain node
+ // simulate the insertion of the event above. This is done by
+ // setting the gain to 1 at the insertion time.
+ let srcRef = new ConstantSourceNode(context, {offset: 1});
+ let g = new GainNode(context, {gain: 0});
+ srcRef.connect(g).connect(merger, 0, 1);
+ srcRef.offset.setValueAtTime(initialValue, initialEventTime);
+ srcRef.offset[method](finalValue, automationEndTime);
+
+ // Allow everything through after |insertFrame| frames.
+ g.gain.setValueAtTime(1, insertTime);
+
+ // Go!
+ src.start();
+ srcRef.start();
+
+ return context.startRendering().then(audioBuffer => {
+ let actual = audioBuffer.getChannelData(0);
+ let expected = audioBuffer.getChannelData(1);
+
+ // Verify that the output is 1 until we reach
+ // insertAtFrame. Ignore the expected data because that always
+ // produces 1.
+ should(
+ actual.slice(0, insertAtFrame),
+ `${prefix}: output[0:${insertAtFrame - 1}]`)
+ .beConstantValueOf(initialValue);
+
+ // Verify ramp is correct by comparing it to the expected
+ // data.
+ should(
+ actual.slice(
+ insertAtFrame, automationEndFrame - insertAtFrame + 1),
+ `${prefix}: output[${insertAtFrame}:${
+ automationEndFrame - insertAtFrame}]`)
+ .beCloseToArray(
+ expected.slice(
+ insertAtFrame, automationEndFrame - insertAtFrame + 1),
+ {absoluteThreshold: 0, numberOfArrayElements: 0});
+
+ // Verify final output has the expected value
+ should(
+ actual.slice(automationEndFrame),
+ `${prefix}: output[${automationEndFrame}:]`)
+ .beConstantValueOf(finalValue);
+ })
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html
new file mode 100644
index 0000000000..0a8e7a7f2f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html
@@ -0,0 +1,855 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test CancelValuesAndHoldAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audio-param.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let renderDuration = 0.5;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'cancelTime', description: 'Test Invalid Values'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: 1,
+ sampleRate: 8000
+ });
+
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ should(
+ () => src.offset.cancelAndHoldAtTime(-1),
+ 'cancelAndHoldAtTime(-1)')
+ .throw(RangeError);
+
+ // These are TypeErrors because |cancelTime| is a
+ // double, not unrestricted double.
+ should(
+ () => src.offset.cancelAndHoldAtTime(NaN),
+ 'cancelAndHoldAtTime(NaN)')
+ .throw(TypeError);
+
+ should(
+ () => src.offset.cancelAndHoldAtTime(Infinity),
+ 'cancelAndHoldAtTime(Infinity)')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // The first few tasks test the cancellation of each relevant automation
+ // function. For the test, a simple linear ramp from 0 to 1 is used to
+ // start things off. Then the automation to be tested is scheduled and
+ // cancelled.
+
+ audit.define(
+ {label: 'linear', description: 'Cancel linearRampToValueAtTime'},
+ function(task, should) {
+ cancelTest(should, linearRampTest('linearRampToValueAtTime'), {
+ valueThreshold: 8.3998e-5,
+ curveThreshold: 5.9605e-5
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {label: 'exponential', description: 'Cancel exponentialRampAtTime'},
+ function(task, should) {
+ // Cancel an exponential ramp. The thresholds are experimentally
+ // determined.
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // After the linear ramp, schedule an exponential ramp to the end.
+ // (This is the event that will be be cancelled.)
+ let v1 = 0.001;
+ let t1 = renderDuration;
+
+ g[0].gain.exponentialRampToValueAtTime(v1, t1);
+ g[1].gain.exponentialRampToValueAtTime(v1, t1);
+
+ expectedConstant = Math.fround(
+ v0 * Math.pow(v1 / v0, (cancelTime - t0) / (t1 - t0)));
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'exponentialRampToValue(' + v1 + ', ' + t1 + ')',
+ summary: 'exponentialRampToValueAtTime',
+ };
+ }, {
+ valueThreshold: 1.8664e-6,
+ curveThreshold: 5.9605e-8
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {label: 'setTarget', description: 'Cancel setTargetAtTime'},
+ function(task, should) {
+ // Cancel a setTarget event.
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // At the end of the linear ramp, schedule a setTarget. (This is
+ // the event that will be cancelled.)
+ let v1 = 0;
+ let t1 = t0;
+ let timeConstant = 0.05;
+
+ g[0].gain.setTargetAtTime(v1, t1, timeConstant);
+ g[1].gain.setTargetAtTime(v1, t1, timeConstant);
+
+ expectedConstant = Math.fround(
+ v1 + (v0 - v1) * Math.exp(-(cancelTime - t0) / timeConstant));
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setTargetAtTime(' + v1 + ', ' + t1 + ', ' +
+ timeConstant + ')',
+ summary: 'setTargetAtTime',
+ };
+ }, {
+ valueThreshold: 4.5267e-7, // 1.1317e-7,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {label: 'setValueCurve', description: 'Cancel setValueCurveAtTime'},
+ function(task, should) {
+ // Cancel a setValueCurve event.
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // After the linear ramp, schedule a setValuesCurve. (This is the
+ // event that will be cancelled.)
+ let v1 = 0;
+ let duration = renderDuration - t0;
+
+ // For simplicity, a 2-point curve so we get a linear interpolated
+ // result.
+ let curve = Float32Array.from([v0, 0]);
+
+ g[0].gain.setValueCurveAtTime(curve, t0, duration);
+ g[1].gain.setValueCurveAtTime(curve, t0, duration);
+
+ let index =
+ Math.floor((curve.length - 1) / duration * (cancelTime - t0));
+
+ let curvePointsPerFrame =
+ (curve.length - 1) / duration / sampleRate;
+ let virtualIndex =
+ (cancelTime - t0) * sampleRate * curvePointsPerFrame;
+
+ let delta = virtualIndex - index;
+ expectedConstant = curve[0] + (curve[1] - curve[0]) * delta;
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setValueCurveAtTime([' + curve + '], ' + t0 +
+ ', ' + duration + ')',
+ summary: 'setValueCurveAtTime',
+ };
+ }, {
+ valueThreshold: 9.5368e-9,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {
+ label: 'setValueCurve after end',
+ description: 'Cancel setValueCurveAtTime after the end'
+ },
+ function(task, should) {
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // After the linear ramp, schedule a setValuesCurve. (This is the
+ // event that will be cancelled.) Make sure the curve ends before
+ // the cancellation time.
+ let v1 = 0;
+ let duration = cancelTime - t0 - 0.125;
+
+ // For simplicity, a 2-point curve so we get a linear interpolated
+ // result.
+ let curve = Float32Array.from([v0, 0]);
+
+ g[0].gain.setValueCurveAtTime(curve, t0, duration);
+ g[1].gain.setValueCurveAtTime(curve, t0, duration);
+
+ expectedConstant = curve[1];
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setValueCurveAtTime([' + curve + '], ' + t0 +
+ ', ' + duration + ')',
+ summary: 'setValueCurveAtTime',
+ };
+ }, {
+ valueThreshold: 0,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ // Special case where we schedule a setTarget and there is no earlier
+ // automation event. This tests that we pick up the starting point
+ // correctly from the last setting of the AudioParam value attribute.
+
+
+ audit.define(
+ {
+ label: 'initial setTarget',
+ description: 'Cancel with initial setTargetAtTime'
+ },
+ function(task, should) {
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ let v1 = 0;
+ let timeConstant = 0.1;
+ g[0].gain.value = 1;
+ g[0].gain.setTargetAtTime(v1, t0, timeConstant);
+ g[1].gain.value = 1;
+ g[1].gain.setTargetAtTime(v1, t0, timeConstant);
+
+ let expectedConstant = Math.fround(
+ v1 + (v0 - v1) * Math.exp(-(cancelTime - t0) / timeConstant));
+
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setTargetAtTime(' + v1 + ', ' + t0 + ', ' +
+ timeConstant + ')',
+ summary: 'Initial setTargetAtTime',
+ };
+ }, {
+ valueThreshold: 3.1210e-6,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ // Test automations scheduled after the call to cancelAndHoldAtTime.
+ // Very similar to the above tests, but we also schedule an event after
+ // cancelAndHoldAtTime and verify that curve after cancellation has
+ // the correct values.
+
+ audit.define(
+ {
+ label: 'post cancel: Linear',
+ description: 'LinearRamp after cancelling'
+ },
+ function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be
+ // cancelled. Then schedule another linear ramp after the
+ // cancellation.
+ cancelTest(
+ should,
+ linearRampTest('Post cancellation linearRampToValueAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the linear ramp on g[0], and do the same for g[2],
+ // using the starting point given by expectedConstant.
+ let v2 = 2;
+ let t2 = cancelTime + 0.125;
+ g[0].gain.linearRampToValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.linearRampToValueAtTime(v2, t2);
+ return {
+ constantEndTime: cancelTime,
+ message: 'Post linearRamp(' + v2 + ', ' + t2 + ')'
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define(
+ {
+ label: 'post cancel: Exponential',
+ description: 'ExponentialRamp after cancelling'
+ },
+ function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be
+ // cancelled. Then schedule an exponential ramp after the
+ // cancellation.
+ cancelTest(
+ should,
+ linearRampTest('Post cancel exponentialRampToValueAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let v2 = 2;
+ let t2 = cancelTime + 0.125;
+ g[0].gain.exponentialRampToValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.exponentialRampToValueAtTime(v2, t2);
+ return {
+ constantEndTime: cancelTime,
+ message: 'Post exponentialRamp(' + v2 + ', ' + t2 + ')'
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('post cancel: ValueCurve', function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be cancelled.
+ // Then schedule a setValueCurve after the cancellation.
+ cancelTest(
+ should, linearRampTest('Post cancel setValueCurveAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let t2 = cancelTime + 0.125;
+ let duration = 0.125;
+ let curve = Float32Array.from([.125, 2]);
+ g[0].gain.setValueCurveAtTime(curve, t2, duration);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.setValueCurveAtTime(curve, t2, duration);
+ return {
+ constantEndTime: cancelTime,
+ message: 'Post setValueCurve([' + curve + '], ' + t2 + ', ' +
+ duration + ')',
+ errorThreshold: 8.3998e-5
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('post cancel: setTarget', function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be cancelled.
+ // Then schedule a setTarget after the cancellation.
+ cancelTest(
+ should, linearRampTest('Post cancel setTargetAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let v2 = 0.125;
+ let t2 = cancelTime + 0.125;
+ let timeConstant = 0.1;
+ g[0].gain.setTargetAtTime(v2, t2, timeConstant);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.setTargetAtTime(v2, t2, timeConstant);
+ return {
+ constantEndTime: cancelTime + 0.125,
+ message: 'Post setTargetAtTime(' + v2 + ', ' + t2 + ', ' +
+ timeConstant + ')',
+ errorThreshold: 8.4037e-5
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('post cancel: setValue', function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be cancelled.
+ // Then schedule a setTarget after the cancellation.
+ cancelTest(
+ should, linearRampTest('Post cancel setValueAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let v2 = 0.125;
+ let t2 = cancelTime + 0.125;
+ g[0].gain.setValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.setValueAtTime(v2, t2);
+ return {
+ constantEndTime: cancelTime + 0.125,
+ message: 'Post setValueAtTime(' + v2 + ', ' + t2 + ')'
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel future setTarget', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setTargetAtTime(0, 0.75 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(actual, 'After cancelling future setTarget event, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel setTarget now', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setTargetAtTime(0, 0.5 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(
+ actual,
+ 'After cancelling setTarget event starting now, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel future setValueCurve', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setValueCurveAtTime([-1, 1], 0.75 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(
+ actual, 'After cancelling future setValueCurve event, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel setValueCurve now', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setValueCurveAtTime([-1, 1], 0.5 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(
+ actual,
+ 'After cancelling current setValueCurve event starting now, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define(
+ {
+ label: 'linear, cancel, linear, cancel, linear',
+ description: 'Schedules 3 linear ramps, cancelling 2 of them, '
+ + 'so that we end up with 2 cancel events next to each other'
+ },
+ (task, should) => {
+ cancelTest2(
+ should,
+ linearRampTest('1st linearRamp'),
+ {valueThreshold: 0, curveThreshold: 5.9605e-8},
+ (g, cancelTime, expectedConstant, cancelTime2) => {
+ // Ramp from first cancel time to the end will be cancelled at
+ // second cancel time.
+ const v1 = expectedConstant;
+ const t1 = cancelTime;
+ const v2 = 2;
+ const t2 = renderDuration;
+ g[0].gain.linearRampToValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(v1, t1);
+ g[2].gain.linearRampToValueAtTime(v2, t2);
+
+ const expectedConstant2 =
+ audioParamLinearRamp(cancelTime2, v1, t1, v2, t2);
+
+ return {
+ constantEndTime: cancelTime,
+ message: `2nd linearRamp(${v2}, ${t2})`,
+ expectedConstant2
+ };
+ },
+ (g, cancelTime2, expectedConstant2) => {
+ // Ramp from second cancel time to the end.
+ const v3 = 0;
+ const t3 = renderDuration;
+ g[0].gain.linearRampToValueAtTime(v3, t3);
+ g[3].gain.setValueAtTime(expectedConstant2, cancelTime2);
+ g[3].gain.linearRampToValueAtTime(v3, t3);
+ return {
+ constantEndTime2: cancelTime2,
+ message2: `3rd linearRamp(${v3}, ${t3})`,
+ };
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ // Common function for doing a linearRamp test. This just does a linear
+ // ramp from 0 to v0 at from time 0 to t0. Then another linear ramp is
+ // scheduled from v0 to 0 from time t0 to t1. This is the ramp that is to
+ // be cancelled.
+ function linearRampTest(message) {
+ return function(g, v0, t0, cancelTime) {
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ let v1 = 0;
+ let t1 = renderDuration;
+ g[0].gain.linearRampToValueAtTime(v1, t1);
+ g[1].gain.linearRampToValueAtTime(v1, t1);
+
+ expectedConstant =
+ Math.fround(v0 + (v1 - v0) * (cancelTime - t0) / (t1 - t0));
+
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage:
+ message + ': linearRampToValue(' + v1 + ', ' + t1 + ')',
+ summary: message,
+ };
+ }
+ }
+
+ // Run the cancellation test. A set of automations is created and
+ // canceled.
+ //
+ // |testerFunction| is a function that generates the automation to be
+ // tested. It is given an array of 3 gain nodes, the value and time of an
+ // initial linear ramp, and the time where the cancellation should occur.
+ // The function must do the automations for the first two gain nodes. It
+ // must return a dictionary with |expectedConstant| being the value at the
+ // cancellation time, |autoMessage| for message to describe the test, and
+ // |summary| for general summary message to be printed at the end of the
+ // test.
+ //
+ // |thresholdOptions| is a property bag that specifies the error threshold
+ // to use. |thresholdOptions.valueThreshold| is the error threshold for
+ // comparing the actual constant output after cancelling to the expected
+ // value. |thresholdOptions.curveThreshold| is the error threshold for
+ // comparing the actual and expected automation curves before the
+ // cancelation point.
+ //
+ // For cancellation tests, |postCancelTest| is a function that schedules
+ // some automation after the cancellation. It takes 3 arguments: an array
+ // of the gain nodes, the cancellation time, and the expected value at the
+ // cancellation time. This function must return a dictionary consisting
+ // of |constantEndtime| indicating when the held constant from
+ // cancellation stops being constant, |message| giving a summary of what
+ // automation is being used, and |errorThreshold| that is the error
+ // threshold between the expected curve and the actual curve.
+ //
+ function cancelTest(
+ should, testerFunction, thresholdOptions, postCancelTest) {
+ // Create a context with three channels. Channel 0 is the test channel
+ // containing the actual output that includes the cancellation of
+ // events. Channel 1 is the expected data upto the cancellation so we
+ // can verify the cancellation produced the correct result. Channel 2
+ // is for verifying events inserted after the cancellation so we can
+ // verify that automations are correctly generated after the
+ // cancellation point.
+ let context =
+ new OfflineAudioContext(3, renderDuration * sampleRate, sampleRate);
+
+ // Test source is a constant signal
+ let src = context.createBufferSource();
+ src.buffer = createConstantBuffer(context, 1, 1);
+ src.loop = true;
+
+ // We'll do the automation tests with three gain nodes. One (g0) will
+ // have cancelAndHoldAtTime and the other (g1) will not. g1 is
+ // used as the expected result for that automation up to the
+ // cancellation point. They should be the same. The third node (g2) is
+ // used for testing automations inserted after the cancellation point,
+ // if any. g2 is the expected result from the cancellation point to the
+ // end of the test.
+
+ let g0 = context.createGain();
+ let g1 = context.createGain();
+ let g2 = context.createGain();
+ let v0 = 1;
+ let t0 = 0.01;
+
+ let cancelTime = renderDuration / 2;
+
+ // Test automation here. The tester function is responsible for setting
+ // up the gain nodes with the desired automation for testing.
+ autoResult = testerFunction([g0, g1, g2], v0, t0, cancelTime);
+ let expectedConstant = autoResult.expectedConstant;
+ let autoMessage = autoResult.autoMessage;
+ let summaryMessage = autoResult.summary;
+
+ // Cancel scheduled events somewhere in the middle of the test
+ // automation.
+ g0.gain.cancelAndHoldAtTime(cancelTime);
+
+ let constantEndTime;
+ if (postCancelTest) {
+ postResult =
+ postCancelTest([g0, g1, g2], cancelTime, expectedConstant);
+ constantEndTime = postResult.constantEndTime;
+ }
+
+ // Connect everything together (with a merger to make a two-channel
+ // result). Channel 0 is the test (with cancelAndHoldAtTime) and
+ // channel 1 is the reference (without cancelAndHoldAtTime).
+ // Channel 1 is used to verify that everything up to the cancellation
+ // has the correct values.
+ src.connect(g0);
+ src.connect(g1);
+ src.connect(g2);
+ let merger = context.createChannelMerger(3);
+ g0.connect(merger, 0, 0);
+ g1.connect(merger, 0, 1);
+ g2.connect(merger, 0, 2);
+ merger.connect(context.destination);
+
+ // Go!
+ src.start();
+
+ return context.startRendering().then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ // The actual output should be a constant from the cancel time to the
+ // end. We use the last value of the actual output as the constant,
+ // but we also want to compare that with what we thought it should
+ // really be.
+
+ let cancelFrame = Math.ceil(cancelTime * sampleRate);
+
+ // Verify that the curves up to the cancel time are "identical". The
+ // should be but round-off may make them differ slightly due to the
+ // way cancelling is done.
+ let endFrame = Math.floor(cancelTime * sampleRate);
+ should(
+ actual.slice(0, endFrame),
+ autoMessage + ' up to time ' + cancelTime)
+ .beCloseToArray(
+ expected.slice(0, endFrame),
+ {absoluteThreshold: thresholdOptions.curveThreshold});
+
+ // Verify the output after the cancellation is a constant.
+ let actualTail;
+ let constantEndFrame;
+
+ if (postCancelTest) {
+ constantEndFrame = Math.ceil(constantEndTime * sampleRate);
+ actualTail = actual.slice(cancelFrame, constantEndFrame);
+ } else {
+ actualTail = actual.slice(cancelFrame);
+ }
+
+ let actualConstant = actual[cancelFrame];
+
+ should(
+ actualTail,
+ 'Cancelling ' + autoMessage + ' at time ' + cancelTime)
+ .beConstantValueOf(actualConstant);
+
+ // Verify that the constant is the value we expect.
+ should(
+ actualConstant,
+ 'Expected value for cancelling ' + autoMessage + ' at time ' +
+ cancelTime)
+ .beCloseTo(
+ expectedConstant,
+ {threshold: thresholdOptions.valueThreshold});
+
+ // Verify the curve after the constantEndTime matches our
+ // expectations.
+ if (postCancelTest) {
+ let c2 = buffer.getChannelData(2);
+ should(actual.slice(constantEndFrame), postResult.message)
+ .beCloseToArray(
+ c2.slice(constantEndFrame),
+ {absoluteThreshold: postResult.errorThreshold || 0});
+ }
+ });
+ }
+
+ // Similar to cancelTest, but does 2 cancels.
+ function cancelTest2(
+ should, testerFunction, thresholdOptions,
+ postCancelTest, postCancelTest2) {
+ // Channel 0: Actual output that includes the cancellation of events.
+ // Channel 1: Expected data up to the first cancellation.
+ // Channel 2: Expected data from 1st cancellation to 2nd cancellation.
+ // Channel 3: Expected data from 2nd cancellation to the end.
+ const context =
+ new OfflineAudioContext(4, renderDuration * sampleRate, sampleRate);
+
+ const src = context.createConstantSource();
+
+ // g0: Actual gain which will have cancelAndHoldAtTime called on it
+ // twice.
+ // g1: Expected gain from start to the 1st cancel.
+ // g2: Expected gain from 1st cancel to the 2nd cancel.
+ // g3: Expected gain from the 2nd cancel to the end.
+ const g0 = context.createGain();
+ const g1 = context.createGain();
+ const g2 = context.createGain();
+ const g3 = context.createGain();
+ const v0 = 1;
+ const t0 = 0.01;
+
+ const cancelTime1 = renderDuration * 0.5;
+ const cancelTime2 = renderDuration * 0.75;
+
+ // Run testerFunction to generate the 1st ramp.
+ const {
+ expectedConstant, autoMessage, summaryMessage} =
+ testerFunction([g0, g1, g2], v0, t0, cancelTime1);
+
+ // 1st cancel, cancelling the 1st ramp.
+ g0.gain.cancelAndHoldAtTime(cancelTime1);
+
+ // Run postCancelTest to generate the 2nd ramp.
+ const {
+ constantEndTime, message, errorThreshold = 0, expectedConstant2} =
+ postCancelTest(
+ [g0, g1, g2], cancelTime1, expectedConstant, cancelTime2);
+
+ // 2nd cancel, cancelling the 2nd ramp.
+ g0.gain.cancelAndHoldAtTime(cancelTime2);
+
+ // Run postCancelTest2 to generate the 3rd ramp.
+ const {constantEndTime2, message2} =
+ postCancelTest2([g0, g1, g2, g3], cancelTime2, expectedConstant2);
+
+ // Connect everything together
+ src.connect(g0);
+ src.connect(g1);
+ src.connect(g2);
+ src.connect(g3);
+ const merger = context.createChannelMerger(4);
+ g0.connect(merger, 0, 0);
+ g1.connect(merger, 0, 1);
+ g2.connect(merger, 0, 2);
+ g3.connect(merger, 0, 3);
+ merger.connect(context.destination);
+
+ // Go!
+ src.start();
+
+ return context.startRendering().then(function (buffer) {
+ const actual = buffer.getChannelData(0);
+ const expected1 = buffer.getChannelData(1);
+ const expected2 = buffer.getChannelData(2);
+ const expected3 = buffer.getChannelData(3);
+
+ const cancelFrame1 = Math.ceil(cancelTime1 * sampleRate);
+ const cancelFrame2 = Math.ceil(cancelTime2 * sampleRate);
+
+ const constantEndFrame1 = Math.ceil(constantEndTime * sampleRate);
+ const constantEndFrame2 = Math.ceil(constantEndTime2 * sampleRate);
+
+ const actualTail1 = actual.slice(cancelFrame1, constantEndFrame1);
+ const actualTail2 = actual.slice(cancelFrame2, constantEndFrame2);
+
+ const actualConstant1 = actual[cancelFrame1];
+ const actualConstant2 = actual[cancelFrame2];
+
+ // Verify first section curve
+ should(
+ actual.slice(0, cancelFrame1),
+ autoMessage + ' up to time ' + cancelTime1)
+ .beCloseToArray(
+ expected1.slice(0, cancelFrame1),
+ {absoluteThreshold: thresholdOptions.curveThreshold});
+
+ // Verify that a value was held after 1st cancel
+ should(
+ actualTail1,
+ 'Cancelling ' + autoMessage + ' at time ' + cancelTime1)
+ .beConstantValueOf(actualConstant1);
+
+ // Verify that held value after 1st cancel was correct
+ should(
+ actualConstant1,
+ 'Expected value for cancelling ' + autoMessage + ' at time ' +
+ cancelTime1)
+ .beCloseTo(
+ expectedConstant,
+ {threshold: thresholdOptions.valueThreshold});
+
+ // Verify middle section curve
+ should(actual.slice(constantEndFrame1, cancelFrame2), message)
+ .beCloseToArray(
+ expected2.slice(constantEndFrame1, cancelFrame2),
+ {absoluteThreshold: errorThreshold});
+
+ // Verify that a value was held after 2nd cancel
+ should(
+ actualTail2,
+ 'Cancelling ' + message + ' at time ' + cancelTime2)
+ .beConstantValueOf(actualConstant2);
+
+ // Verify that held value after 2nd cancel was correct
+ should(
+ actualConstant2,
+ 'Expected value for cancelling ' + message + ' at time ' +
+ cancelTime2)
+ .beCloseTo(
+ expectedConstant2,
+ {threshold: thresholdOptions.valueThreshold});
+
+ // Verify end section curve
+ should(actual.slice(constantEndFrame2), message2)
+ .beCloseToArray(
+ expected3.slice(constantEndFrame2),
+ {absoluteThreshold: errorThreshold || 0});
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html
new file mode 100644
index 0000000000..b5555b0137
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html
@@ -0,0 +1,161 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test AudioParam events very close in time</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Largest sample rate that is required to be supported and is a power of
+ // two, to eliminate round-off as much as possible.
+ const sampleRate = 65536;
+
+ // Only need one render quantum for testing.
+ const testFrames = 128;
+
+ // Largest representable single-float number
+ const floatMax = Math.fround(3.4028234663852886e38);
+
+ // epspos is the smallest x such that 1 + x != 1
+ const epspos = 1.1102230246251568e-16;
+ // epsneg is the smallest x such that 1 - x != 1
+ const epsneg = 5.551115123125784e-17;
+
+ audit.define(
+ {label: 'no-nan', description: 'NaN does not occur'},
+ (task, should) => {
+ const context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ sampleRate: sampleRate,
+ length: testFrames
+ });
+
+ const src0 = new ConstantSourceNode(context, {offset: 0});
+
+ // This should always succeed. We just want to print out a message
+ // that |src0| is a constant source node for the following
+ // processing.
+ should(src0, 'src0 = new ConstantSourceNode(context, {offset: 0})')
+ .beEqualTo(src0);
+
+ src0.connect(context.destination);
+
+ // Values for the first event (setValue). |time1| MUST be 0.
+ const time1 = 0;
+ const value1 = 10;
+
+ // Values for the second event (linearRamp). |value2| must be huge,
+ // and |time2| must be small enough that 1/|time2| overflows a
+ // single float. This value is the least positive single float.
+ const value2 = floatMax;
+ const time2 = 1.401298464324817e-45;
+
+ // These should always succeed; the messages are just informational
+ // to show the events that we scheduled.
+ should(
+ src0.offset.setValueAtTime(value1, time1),
+ `src0.offset.setValueAtTime(${value1}, ${time1})`)
+ .beEqualTo(src0.offset);
+ should(
+ src0.offset.linearRampToValueAtTime(value2, time2),
+ `src0.offset.linearRampToValueAtTime(${value2}, ${time2})`)
+ .beEqualTo(src0.offset);
+
+ src0.start();
+
+ context.startRendering()
+ .then(buffer => {
+ const output = buffer.getChannelData(0);
+
+ // Since time1 = 0, the output at frame 0 MUST be value1.
+ should(output[0], 'output[0]').beEqualTo(value1);
+
+ // Since time2 < 1, output from frame 1 and later must be a
+ // constant.
+ should(output.slice(1), 'output[1]')
+ .beConstantValueOf(value2);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'interpolation', description: 'Interpolation of linear ramp'},
+ (task, should) => {
+ const context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ sampleRate: sampleRate,
+ length: testFrames
+ });
+
+ const src1 = new ConstantSourceNode(context, {offset: 0});
+
+ // This should always succeed. We just want to print out a message
+ // that |src1| is a constant source node for the following
+ // processing.
+ should(src1, 'src1 = new ConstantSourceNode(context, {offset: 0})')
+ .beEqualTo(src1);
+
+ src1.connect(context.destination);
+
+ const frame = 1;
+
+ // These time values are arranged so that time1 < frame/sampleRate <
+ // time2. This means we need to interpolate to get a value at given
+ // frame.
+ //
+ // The values are not so important, but |value2| should be huge.
+ const time1 = frame * (1 - epsneg) / context.sampleRate;
+ const value1 = 1e15;
+
+ const time2 = frame * (1 + epspos) / context.sampleRate;
+ const value2 = floatMax;
+
+ should(
+ src1.offset.setValueAtTime(value1, time1),
+ `src1.offset.setValueAtTime(${value1}, ${time1})`)
+ .beEqualTo(src1.offset);
+ should(
+ src1.offset.linearRampToValueAtTime(value2, time2),
+ `src1.offset.linearRampToValueAtTime(${value2}, ${time2})`)
+ .beEqualTo(src1.offset);
+
+ src1.start();
+
+ context.startRendering()
+ .then(buffer => {
+ const output = buffer.getChannelData(0);
+
+ // Sanity check
+ should(time2 - time1, 'Event time difference')
+ .notBeEqualTo(0);
+
+ // Because 0 < time1 < 1, output must be 0 at time 0.
+ should(output[0], 'output[0]').beEqualTo(0);
+
+ // Because time1 < 1/sampleRate < time2, we need to
+ // interpolate the value between these times to determine the
+ // output at frame 1.
+ const t = frame / context.sampleRate;
+ const v = value1 +
+ (value2 - value1) * (t - time1) / (time2 - time1);
+
+ should(output[1], 'output[1]').beCloseTo(v, {threshold: 0});
+
+ // Because 1 < time2 < 2, the output at frame 2 and higher is
+ // constant.
+ should(output.slice(2), 'output[2:]')
+ .beConstantValueOf(value2);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html
new file mode 100644
index 0000000000..b0455f86bc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html
@@ -0,0 +1,103 @@
+<!DOCTYPE html>
+<!--
+Tests that an audio-rate signal (AudioNode output) can be connected to an
+AudioParam. Specifically, this tests that an audio-rate signal coming from an
+AudioBufferSourceNode playing an AudioBuffer containing a specific curve can be
+connected to an AudioGainNode's .gain attribute (an AudioParam). Another
+AudioBufferSourceNode will be the audio source having its gain changed. We load
+this one with an AudioBuffer containing a constant value of 1. Thus it's easy
+to check that the resultant signal should be equal to the gain-scaling curve.
+-->
+<html>
+ <head>
+ <title>
+ audioparam-connect-audioratesignal.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100.0;
+ let lengthInSeconds = 1;
+
+ let context = 0;
+ let constantOneBuffer = 0;
+ let linearRampBuffer = 0;
+
+ function checkResult(renderedBuffer, should) {
+ let renderedData = renderedBuffer.getChannelData(0);
+ let expectedData = linearRampBuffer.getChannelData(0);
+ let n = renderedBuffer.length;
+
+ should(n, 'Rendered signal length').beEqualTo(linearRampBuffer.length);
+
+ // Check that the rendered result exactly matches the buffer used to
+ // control gain. This is because we're changing the gain of a signal
+ // having constant value 1.
+ let success = true;
+ for (let i = 0; i < n; ++i) {
+ if (renderedData[i] != expectedData[i]) {
+ success = false;
+ break;
+ }
+ }
+
+ should(
+ success,
+ 'Rendered signal exactly matches the audio-rate gain changing signal')
+ .beTrue();
+ }
+
+ audit.define('test', function(task, should) {
+ let sampleFrameLength = sampleRate * lengthInSeconds;
+
+ // Create offline audio context.
+ context = new OfflineAudioContext(1, sampleFrameLength, sampleRate);
+
+ // Create buffer used by the source which will have its gain controlled.
+ constantOneBuffer = createConstantBuffer(context, sampleFrameLength, 1);
+
+ // Create buffer used to control gain.
+ linearRampBuffer = createLinearRampBuffer(context, sampleFrameLength);
+
+ // Create the two sources.
+
+ let constantSource = context.createBufferSource();
+ constantSource.buffer = constantOneBuffer;
+
+ let gainChangingSource = context.createBufferSource();
+ gainChangingSource.buffer = linearRampBuffer;
+
+ // Create a gain node controlling the gain of constantSource and make
+ // the connections.
+ let gainNode = context.createGain();
+
+ // Intrinsic baseline gain of zero.
+ gainNode.gain.value = 0;
+
+ constantSource.connect(gainNode);
+ gainNode.connect(context.destination);
+
+ // Connect an audio-rate signal to control the .gain AudioParam.
+ // This is the heart of what is being tested.
+ gainChangingSource.connect(gainNode.gain);
+
+ // Start both sources at time 0.
+ constantSource.start(0);
+ gainChangingSource.start(0);
+
+ context.startRendering().then(buffer => {
+ checkResult(buffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html
new file mode 100644
index 0000000000..982731d338
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html
@@ -0,0 +1,240 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audioparam-exceptional-values.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Context to use for all of the tests. The context isn't used for any
+ // processing; just need one for creating a gain node, which is used for
+ // all the tests.
+ let context;
+
+ // For these values, AudioParam methods should throw a Typeerror because
+ // they are not finite values.
+ let nonFiniteValues = [Infinity, -Infinity, NaN];
+
+ audit.define('initialize', (task, should) => {
+ should(() => {
+ // Context for testing. Rendering isn't done, so any valid values can
+ // be used here so might as well make them small.
+ context = new OfflineAudioContext(1, 1, 8000);
+ }, 'Creating context for testing').notThrow();
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'test value',
+ description: 'Test non-finite arguments for AudioParam value'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Default method for generating the arguments for an automation
+ // method for testing the value of the automation.
+ let defaultFuncArg = (value) => [value, 1];
+
+ // Test the value parameter
+ doTests(should, gain, TypeError, nonFiniteValues, [
+ {automationName: 'setValueAtTime', funcArg: defaultFuncArg}, {
+ automationName: 'linearRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (value) => [value, 1, 1]
+ }
+ ]);
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'test time',
+ description: 'Test non-finite arguments for AudioParam time'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Default method for generating the arguments for an automation
+ // method for testing the time parameter of the automation.
+ let defaultFuncArg = (startTime) => [1, startTime];
+
+ // Test the time parameter
+ doTests(should, gain, TypeError, nonFiniteValues, [
+ {automationName: 'setValueAtTime', funcArg: defaultFuncArg},
+ {
+ automationName: 'linearRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ // Test start time for setTarget
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (startTime) => [1, startTime, 1]
+ },
+ // Test time constant for setTarget
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (timeConstant) => [1, 1, timeConstant]
+ },
+ ]);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'test setValueCurve',
+ description: 'Test non-finite arguments for setValueCurveAtTime'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Just an array for use by setValueCurveAtTime. The length and
+ // contents of the array are not important.
+ let curve = new Float32Array(3);
+
+ doTests(should, gain, TypeError, nonFiniteValues, [
+ {
+ automationName: 'setValueCurveAtTime',
+ funcArg: (startTime) => [curve, startTime, 1]
+ },
+ ]);
+
+ // Non-finite values for the curve should signal an error
+ doTests(
+ should, gain, TypeError,
+ [[1, 2, Infinity, 3], [1, NaN, 2, 3]], [{
+ automationName: 'setValueCurveAtTime',
+ funcArg: (c) => [c, 1, 1]
+ }]);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'special cases 1',
+ description: 'Test exceptions for finite values'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Default method for generating the arguments for an automation
+ // method for testing the time parameter of the automation.
+ let defaultFuncArg = (startTime) => [1, startTime];
+
+ // Test the time parameter
+ let curve = new Float32Array(3);
+ doTests(should, gain, RangeError, [-1], [
+ {automationName: 'setValueAtTime', funcArg: defaultFuncArg},
+ {
+ automationName: 'linearRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (startTime) => [1, startTime, 1]
+ },
+ // Test time constant
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (timeConstant) => [1, 1, timeConstant]
+ },
+ // startTime and duration for setValueCurve
+ {
+ automationName: 'setValueCurveAtTime',
+ funcArg: (startTime) => [curve, startTime, 1]
+ },
+ {
+ automationName: 'setValueCurveAtTime',
+ funcArg: (duration) => [curve, 1, duration]
+ },
+ ]);
+
+ // Two final tests for setValueCurve: duration must be strictly
+ // positive.
+ should(
+ () => gain.gain.setValueCurveAtTime(curve, 1, 0),
+ 'gain.gain.setValueCurveAtTime(curve, 1, 0)')
+ .throw(RangeError);
+ should(
+ () => gain.gain.setValueCurveAtTime(curve, 1, -1),
+ 'gain.gain.setValueCurveAtTime(curve, 1, -1)')
+ .throw(RangeError);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'special cases 2',
+ description: 'Test special cases for expeonentialRamp'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ doTests(should, gain, RangeError, [0, -1e-100, 1e-100], [{
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: (value) => [value, 1]
+ }]);
+
+ task.done();
+ });
+
+ audit.run();
+
+ // Run test over the set of values in |testValues| for all of the
+ // automation methods in |testMethods|. The expected error type is
+ // |errorName|. |testMethods| is an array of dictionaries with attributes
+ // |automationName| giving the name of the automation method to be tested
+ // and |funcArg| being a function of one parameter that produces an array
+ // that will be used as the argument to the automation method.
+ function doTests(should, node, errorName, testValues, testMethods) {
+ testValues.forEach(value => {
+ testMethods.forEach(method => {
+ let args = method.funcArg(value);
+ let message = 'gain.gain.' + method.automationName + '(' +
+ argString(args) + ')';
+ should(() => node.gain[method.automationName](...args), message)
+ .throw(errorName);
+ });
+ });
+ }
+
+ // Specialized printer for automation arguments so that messages make
+ // sense. We assume the first element is either a number or an array. If
+ // it's an array, there are always three elements, and we want to print
+ // out the brackets for the array argument.
+ function argString(arg) {
+ if (typeof(arg[0]) === 'number') {
+ return arg.toString();
+ }
+
+ return '[' + arg[0] + '],' + arg[1] + ',' + arg[2];
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html
new file mode 100644
index 0000000000..bec4c1286b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html
@@ -0,0 +1,63 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.exponentialRampToValueAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() and exponentialRampToValueAtTime() at regular
+ // intervals to set the starting and ending values for an exponential
+ // ramp. Each time interval has a ramp with a different starting and
+ // ending value so that there is a discontinuity at each time interval
+ // boundary. The discontinuity is for testing timing. Also, we alternate
+ // between an increasing and decreasing ramp for each interval.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 1.222e-5;
+
+ // The AudioGainNode starts with this value instead of the default value.
+ let initialValue = 100;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // Generate an exponential ramp ending at time |endTime| with an ending
+ // value of |value|.
+ function generateRamp(value, startTime, endTime){
+ // |startTime| is ignored because the exponential ramp
+ // uses the value from the setValueAtTime() call above.
+ gainNode.gain.exponentialRampToValueAtTime(value, endTime)}
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'AudioParam exponentialRampToValueAtTime() functionality'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, initialValue, setValue,
+ generateRamp, 'exponentialRampToValueAtTime()', maxAllowedError,
+ createExponentialRampArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html
new file mode 100644
index 0000000000..d8f38eeba0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ AudioParam with Huge End Time
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ // Render for some small (but fairly arbitrary) time.
+ let renderDuration = 0.125;
+ // Any huge time value that won't fit in a size_t (2^64 on a 64-bit
+ // machine).
+ let largeTime = 1e300;
+
+ let audit = Audit.createTaskRunner();
+
+ // See crbug.com/582701. Create an audioparam with a huge end time and
+ // verify that to automation is run. We don't care about the actual
+ // results, just that it runs.
+
+ // Test linear ramp with huge end time
+ audit.define('linearRamp', (task, should) => {
+ let graph = createGraph();
+ graph.gain.gain.linearRampToValueAtTime(0.1, largeTime);
+
+ graph.source.start();
+ graph.context.startRendering()
+ .then(function(buffer) {
+ should(true, 'linearRampToValue(0.1, ' + largeTime + ')')
+ .message('successfully rendered', 'unsuccessfully rendered');
+ })
+ .then(() => task.done());
+ });
+
+ // Test exponential ramp with huge end time
+ audit.define('exponentialRamp', (task, should) => {
+ let graph = createGraph();
+ graph.gain.gain.exponentialRampToValueAtTime(.1, largeTime);
+
+ graph.source.start();
+ graph.context.startRendering()
+ .then(function(buffer) {
+ should(true, 'exponentialRampToValue(0.1, ' + largeTime + ')')
+ .message('successfully rendered', 'unsuccessfully rendered');
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ // Create the graph and return the context, the source, and the gain node.
+ function createGraph() {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ let src = context.createBufferSource();
+ src.buffer = createConstantBuffer(context, 1, 1);
+ src.loop = true;
+ let gain = context.createGain();
+ src.connect(gain);
+ gain.connect(context.destination);
+ gain.gain.setValueAtTime(1, 0.1 / sampleRate);
+
+ return {context: context, gain: gain, source: src};
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html
new file mode 100644
index 0000000000..509c254d92
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html
@@ -0,0 +1,60 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.linearRampToValueAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() and linearRampToValueAtTime() at regular intervals to
+ // set the starting and ending values for a linear ramp. Each time
+ // interval has a ramp with a different starting and ending value so that
+ // there is a discontinuity at each time interval boundary. The
+ // discontinuity is for testing timing. Also, we alternate between an
+ // increasing and decreasing ramp for each interval.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 1.865e-6;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // Generate a linear ramp ending at time |endTime| with an ending value of
+ // |value|.
+ function generateRamp(value, startTime, endTime){
+ // |startTime| is ignored because the linear ramp uses the value from
+ // the
+ // setValueAtTime() call above.
+ gainNode.gain.linearRampToValueAtTime(value, endTime)}
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam linearRampToValueAtTime() functionality'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, 1, setValue, generateRamp,
+ 'linearRampToValueAtTime()', maxAllowedError,
+ createLinearRampArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html
new file mode 100644
index 0000000000..ffe46035fd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audioparam-method-chaining.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 8000;
+
+ // Create a dummy array for setValueCurveAtTime method.
+ let curveArray = new Float32Array([5.0, 6.0]);
+
+ // AudioNode dictionary with associated dummy arguments.
+ let methodDictionary = [
+ {name: 'setValueAtTime', args: [1.0, 0.0]},
+ {name: 'linearRampToValueAtTime', args: [2.0, 1.0]},
+ {name: 'exponentialRampToValueAtTime', args: [3.0, 2.0]},
+ {name: 'setTargetAtTime', args: [4.0, 2.0, 0.5]},
+ {name: 'setValueCurveAtTime', args: [curveArray, 5.0, 1.0]},
+ {name: 'cancelScheduledValues', args: [6.0]}
+ ];
+
+ let audit = Audit.createTaskRunner();
+
+ // Task: testing entries from the dictionary.
+ audit.define('from-dictionary', (task, should) => {
+ let context = new AudioContext();
+
+ methodDictionary.forEach(function(method) {
+ let sourceParam = context.createGain().gain;
+ should(
+ sourceParam === sourceParam[method.name](...method.args),
+ 'The return value of ' + sourceParam.constructor.name + '.' +
+ method.name + '()' +
+ ' matches the source AudioParam')
+ .beEqualTo(true);
+
+ });
+
+ task.done();
+ });
+
+ // Task: test method chaining with invalid operation.
+ audit.define('invalid-operation', (task, should) => {
+ let context = new OfflineAudioContext(1, sampleRate, sampleRate);
+ let osc = context.createOscillator();
+ let amp1 = context.createGain();
+ let amp2 = context.createGain();
+
+ osc.connect(amp1);
+ osc.connect(amp2);
+ amp1.connect(context.destination);
+ amp2.connect(context.destination);
+
+ // The first operation fails with an exception, thus the second one
+ // should not have effect on the parameter value. Instead, it should
+ // maintain the default value of 1.0.
+ should(
+ function() {
+ amp1.gain.setValueAtTime(0.25, -1.0)
+ .linearRampToValueAtTime(2.0, 1.0);
+ },
+ 'Calling setValueAtTime() with a negative end time')
+ .throw(RangeError);
+
+ // The first operation succeeds but the second fails due to zero target
+ // value for the exponential ramp. Thus only the first should have
+ // effect on the parameter value, setting the value to 0.5.
+ should(
+ function() {
+ amp2.gain.setValueAtTime(0.5, 0.0).exponentialRampToValueAtTime(
+ 0.0, 1.0);
+ },
+ 'Calling exponentialRampToValueAtTime() with a zero target value')
+ .throw(RangeError);
+
+ osc.start();
+ osc.stop(1.0);
+
+ context.startRendering()
+ .then(function(buffer) {
+ should(amp1.gain.value, 'The gain value of the first gain node')
+ .beEqualTo(1.0);
+ should(amp2.gain.value, 'The gain value of the second gain node')
+ .beEqualTo(0.5);
+ })
+ .then(() => task.done());
+ });
+
+ // Task: verify if the method chaining actually works. Create an arbitrary
+ // envelope and compare the result with the expected one created by JS
+ // code.
+ audit.define('verification', (task, should) => {
+ let context = new OfflineAudioContext(1, sampleRate * 4, sampleRate);
+ let constantBuffer = createConstantBuffer(context, 1, 1.0);
+
+ let source = context.createBufferSource();
+ source.buffer = constantBuffer;
+ source.loop = true;
+
+ let envelope = context.createGain();
+
+ source.connect(envelope);
+ envelope.connect(context.destination);
+
+ envelope.gain.setValueAtTime(0.0, 0.0)
+ .linearRampToValueAtTime(1.0, 1.0)
+ .exponentialRampToValueAtTime(0.5, 2.0)
+ .setTargetAtTime(0.001, 2.0, 0.5);
+
+ source.start();
+
+ context.startRendering()
+ .then(function(buffer) {
+ let expectedEnvelope =
+ createLinearRampArray(0.0, 1.0, 0.0, 1.0, sampleRate);
+ expectedEnvelope.push(...createExponentialRampArray(
+ 1.0, 2.0, 1.0, 0.5, sampleRate));
+ expectedEnvelope.push(...createExponentialApproachArray(
+ 2.0, 4.0, 0.5, 0.001, sampleRate, 0.5));
+
+ // There are slight differences between JS implementation of
+ // AudioParam envelope and the internal implementation. (i.e.
+ // double/float and rounding up) The error threshold is adjusted
+ // empirically through the local testing.
+ should(buffer.getChannelData(0), 'The rendered envelope')
+ .beCloseToArray(
+ expectedEnvelope, {absoluteThreshold: 4.0532e-6});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html
new file mode 100644
index 0000000000..517fc6e956
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html
@@ -0,0 +1,497 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam Nominal Range Values
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Some arbitrary sample rate for the offline context.
+ let sampleRate = 48000;
+
+ // The actual contexts to use. Generally use the offline context for
+ // testing except for the media nodes which require an AudioContext.
+ let offlineContext;
+ let audioContext;
+
+ // The set of all methods that we've tested for verifying that we tested
+ // all of the necessary objects.
+ let testedMethods = new Set();
+
+ // The most positive single float value (the value just before infinity).
+ // Be careful when changing this value! Javascript only uses double
+ // floats, so the value here should be the max single-float value,
+ // converted directly to a double-float value. This also depends on
+ // Javascript reading this value and producing the desired double-float
+ // value correctly.
+ let mostPositiveFloat = 3.4028234663852886e38;
+
+ let audit = Audit.createTaskRunner();
+
+ // Array describing the tests that should be run. |testOfflineConfigs| is
+ // for tests that can use an offline context. |testOnlineConfigs| is for
+ // tests that need to use an online context. Offline contexts are
+ // preferred when possible.
+ let testOfflineConfigs = [
+ {
+ // The name of the method to create the particular node to be tested.
+ creator: 'createGain',
+
+ // Any args to pass to the creator function.
+ args: [],
+
+ // The min/max limits for each AudioParam of the node. This is a
+ // dictionary whose keys are
+ // the names of each AudioParam in the node. Don't define this if the
+ // node doesn't have any
+ // AudioParam attributes.
+ limits: {
+ gain: {
+ // The expected min and max values for this AudioParam.
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat
+ }
+ }
+ },
+ {
+ creator: 'createDelay',
+ // Just specify a non-default value for the maximum delay so we can
+ // make sure the limits are
+ // set correctly.
+ args: [1.5],
+ limits: {delayTime: {minValue: 0, maxValue: 1.5}}
+ },
+ {
+ creator: 'createBufferSource',
+ args: [],
+ limits: {
+ playbackRate:
+ {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat},
+ detune: {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat}
+ }
+ },
+ {
+ creator: 'createStereoPanner',
+ args: [],
+ limits: {pan: {minValue: -1, maxValue: 1}}
+ },
+ {
+ creator: 'createDynamicsCompressor',
+ args: [],
+ // Do not set limits for reduction; it's currently an AudioParam but
+ // should be a float.
+ // So let the test fail for reduction. When reduction is changed,
+ // this test will then
+ // correctly pass.
+ limits: {
+ threshold: {minValue: -100, maxValue: 0},
+ knee: {minValue: 0, maxValue: 40},
+ ratio: {minValue: 1, maxValue: 20},
+ attack: {minValue: 0, maxValue: 1},
+ release: {minValue: 0, maxValue: 1}
+ }
+ },
+ {
+ creator: 'createBiquadFilter',
+ args: [],
+ limits: {
+ gain: {
+ minValue: -mostPositiveFloat,
+ // This complicated expression is used to get all the arithmetic
+ // to round to the correct single-precision float value for the
+ // desired max. This also assumes that the implication computes
+ // the limit as 40 * log10f(std::numeric_limits<float>::max()).
+ maxValue:
+ Math.fround(40 * Math.fround(Math.log10(mostPositiveFloat)))
+ },
+ Q: {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat},
+ frequency: {minValue: 0, maxValue: sampleRate / 2},
+ detune: {
+ minValue: -Math.fround(1200 * Math.log2(mostPositiveFloat)),
+ maxValue: Math.fround(1200 * Math.log2(mostPositiveFloat))
+ }
+ }
+ },
+ {
+ creator: 'createOscillator',
+ args: [],
+ limits: {
+ frequency: {minValue: -sampleRate / 2, maxValue: sampleRate / 2},
+ detune: {
+ minValue: -Math.fround(1200 * Math.log2(mostPositiveFloat)),
+ maxValue: Math.fround(1200 * Math.log2(mostPositiveFloat))
+ }
+ }
+ },
+ {
+ creator: 'createPanner',
+ args: [],
+ limits: {
+ positionX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ orientationX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ orientationY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ orientationZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ }
+ },
+ },
+ {
+ creator: 'createConstantSource',
+ args: [],
+ limits: {
+ offset: {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat}
+ }
+ },
+ // These nodes don't have AudioParams, but we want to test them anyway.
+ // Any arguments for the
+ // constructor are pretty much arbitrary; they just need to be valid.
+ {
+ creator: 'createBuffer',
+ args: [1, 1, sampleRate],
+ },
+ {creator: 'createIIRFilter', args: [[1, 2], [1, .9]]},
+ {
+ creator: 'createWaveShaper',
+ args: [],
+ },
+ {
+ creator: 'createConvolver',
+ args: [],
+ },
+ {
+ creator: 'createAnalyser',
+ args: [],
+ },
+ {
+ creator: 'createScriptProcessor',
+ args: [0],
+ },
+ {
+ creator: 'createPeriodicWave',
+ args: [Float32Array.from([0, 0]), Float32Array.from([1, 0])],
+ },
+ {
+ creator: 'createChannelSplitter',
+ args: [],
+ },
+ {
+ creator: 'createChannelMerger',
+ args: [],
+ },
+ ];
+
+ let testOnlineConfigs = [
+ {creator: 'createMediaElementSource', args: [new Audio()]},
+ {creator: 'createMediaStreamDestination', args: []}
+ // Can't currently test MediaStreamSource because we're using an offline
+ // context.
+ ];
+
+ // Create the contexts so we can use it in the following test.
+ audit.define('initialize', (task, should) => {
+ // Just any context so that we can create the nodes.
+ should(() => {
+ offlineContext = new OfflineAudioContext(1, 1, sampleRate);
+ }, 'Create offline context for tests').notThrow();
+ should(() => {
+ onlineContext = new AudioContext();
+ }, 'Create online context for tests').notThrow();
+ task.done();
+ });
+
+ // Create a task for each entry in testOfflineConfigs
+ for (let test in testOfflineConfigs) {
+ let config = testOfflineConfigs[test]
+ audit.define('Offline ' + config.creator, (function(c) {
+ return (task, should) => {
+ let node = offlineContext[c.creator](...c.args);
+ testLimits(should, c.creator, node, c.limits);
+ task.done();
+ };
+ })(config));
+ }
+
+ for (let test in testOnlineConfigs) {
+ let config = testOnlineConfigs[test]
+ audit.define('Online ' + config.creator, (function(c) {
+ return (task, should) => {
+ let node = onlineContext[c.creator](...c.args);
+ testLimits(should, c.creator, node, c.limits);
+ task.done();
+ };
+ })(config));
+ }
+
+ // Test the AudioListener params that were added for the automated Panner
+ audit.define('AudioListener', (task, should) => {
+ testLimits(should, '', offlineContext.listener, {
+ positionX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ forwardX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ forwardY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ forwardZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ upX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ upY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ upZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ }
+ });
+ task.done();
+ });
+
+ // Verify that we have tested all the create methods available on the
+ // context.
+ audit.define('verifyTests', (task, should) => {
+ let allNodes = new Set();
+ // Create the set of all "create" methods from the context.
+ for (let method in offlineContext) {
+ if (typeof offlineContext[method] === 'function' &&
+ method.substring(0, 6) === 'create') {
+ allNodes.add(method);
+ }
+ }
+
+ // Compute the difference between the set of all create methods on the
+ // context and the set of tests that we've run.
+ let diff = new Set([...allNodes].filter(x => !testedMethods.has(x)));
+
+ // Can't currently test a MediaStreamSourceNode, so remove it from the
+ // diff set.
+ diff.delete('createMediaStreamSource');
+
+ // It's a test failure if we didn't test all of the create methods in
+ // the context (except createMediaStreamSource, of course).
+ let output = [];
+ if (diff.size) {
+ for (let item of diff)
+ output.push(' ' + item.substring(6));
+ }
+
+ should(output.length === 0, 'Number of nodes not tested')
+ .message(': 0', ': ' + output);
+
+ task.done();
+ });
+
+ // Simple test of a few automation methods to verify we get warnings.
+ audit.define('automation', (task, should) => {
+ // Just use a DelayNode for testing because the audio param has finite
+ // limits.
+ should(() => {
+ let d = offlineContext.createDelay();
+
+ // The console output should have the warnings that we're interested
+ // in.
+ d.delayTime.setValueAtTime(-1, 0);
+ d.delayTime.linearRampToValueAtTime(2, 1);
+ d.delayTime.exponentialRampToValueAtTime(3, 2);
+ d.delayTime.setTargetAtTime(-1, 3, .1);
+ d.delayTime.setValueCurveAtTime(
+ Float32Array.from([.1, .2, 1.5, -1]), 4, .1);
+ }, 'Test automations (check console logs)').notThrow();
+ task.done();
+ });
+
+ audit.run();
+
+ // Is |object| an AudioParam? We determine this by checking the
+ // constructor name.
+ function isAudioParam(object) {
+ return object && object.constructor.name === 'AudioParam';
+ }
+
+ // Does |limitOptions| exist and does it have valid values for the
+ // expected min and max values?
+ function hasValidLimits(limitOptions) {
+ return limitOptions && (typeof limitOptions.minValue === 'number') &&
+ (typeof limitOptions.maxValue === 'number');
+ }
+
+ // Check the min and max values for the AudioParam attribute named
+ // |paramName| for the |node|. The expected limits is given by the
+ // dictionary |limits|. If some test fails, add the name of the failed
+ function validateAudioParamLimits(should, node, paramName, limits) {
+ let nodeName = node.constructor.name;
+ let parameter = node[paramName];
+ let prefix = nodeName + '.' + paramName;
+
+ let success = true;
+ if (hasValidLimits(limits[paramName])) {
+ // Verify that the min and max values for the parameter are correct.
+ let isCorrect = should(parameter.minValue, prefix + '.minValue')
+ .beEqualTo(limits[paramName].minValue);
+ isCorrect = should(parameter.maxValue, prefix + '.maxValue')
+ .beEqualTo(limits[paramName].maxValue) &&
+ isCorrect;
+
+ // Verify that the min and max attributes are read-only. |testValue|
+ // MUST be a number that can be represented exactly the same way as
+ // both a double and single float. A small integer works nicely.
+ const testValue = 42;
+ parameter.minValue = testValue;
+ let isReadOnly;
+ isReadOnly =
+ should(parameter.minValue, `${prefix}.minValue = ${testValue}`)
+ .notBeEqualTo(testValue);
+
+ should(isReadOnly, prefix + '.minValue is read-only').beEqualTo(true);
+
+ isCorrect = isReadOnly && isCorrect;
+
+ parameter.maxValue = testValue;
+ isReadOnly =
+ should(parameter.maxValue, `${prefix}.maxValue = ${testValue}`)
+ .notBeEqualTo(testValue);
+ should(isReadOnly, prefix + '.maxValue is read-only').beEqualTo(true);
+
+ isCorrect = isReadOnly && isCorrect;
+
+ // Now try to set the parameter outside the nominal range.
+ let newValue = 2 * limits[paramName].minValue - 1;
+
+ let isClipped = true;
+ let clippingTested = false;
+ // If the new value is beyond float the largest single-precision
+ // float, skip the test because Chrome throws an error.
+ if (newValue >= -mostPositiveFloat) {
+ parameter.value = newValue;
+ clippingTested = true;
+ isClipped =
+ should(
+ parameter.value, 'Set ' + prefix + '.value = ' + newValue)
+ .beEqualTo(parameter.minValue) &&
+ isClipped;
+ }
+
+ newValue = 2 * limits[paramName].maxValue + 1;
+
+ if (newValue <= mostPositiveFloat) {
+ parameter.value = newValue;
+ clippingTested = true;
+ isClipped =
+ should(
+ parameter.value, 'Set ' + prefix + '.value = ' + newValue)
+ .beEqualTo(parameter.maxValue) &&
+ isClipped;
+ }
+
+ if (clippingTested) {
+ should(
+ isClipped,
+ prefix + ' was clipped to lie within the nominal range')
+ .beEqualTo(true);
+ }
+
+ isCorrect = isCorrect && isClipped;
+
+ success = isCorrect && success;
+ } else {
+ // Test config didn't specify valid limits. Fail this test!
+ should(
+ clippingTested,
+ 'Limits for ' + nodeName + '.' + paramName +
+ ' were correctly defined')
+ .beEqualTo(false);
+
+ success = false;
+ }
+
+ return success;
+ }
+
+ // Test all of the AudioParams for |node| using the expected values in
+ // |limits|. |creatorName| is the name of the method to create the node,
+ // and is used to keep trakc of which tests we've run.
+ function testLimits(should, creatorName, node, limits) {
+ let nodeName = node.constructor.name;
+ testedMethods.add(creatorName);
+
+ let success = true;
+
+ // List of all of the AudioParams that were tested.
+ let audioParams = [];
+
+ // List of AudioParams that failed the test.
+ let incorrectParams = [];
+
+ // Look through all of the keys for the node and extract just the
+ // AudioParams
+ Object.keys(node.__proto__).forEach(function(paramName) {
+ if (isAudioParam(node[paramName])) {
+ audioParams.push(paramName);
+ let isValid = validateAudioParamLimits(
+ should, node, paramName, limits, incorrectParams);
+ if (!isValid)
+ incorrectParams.push(paramName);
+
+ success = isValid && success;
+ }
+ });
+
+ // Print an appropriate message depending on whether there were
+ // AudioParams defined or not.
+ if (audioParams.length) {
+ let message =
+ 'Nominal ranges for AudioParam(s) of ' + node.constructor.name;
+ should(success, message)
+ .message('are correct', 'are incorrect for: ' + +incorrectParams);
+ return success;
+ } else {
+ should(!limits, nodeName)
+ .message(
+ 'has no AudioParams as expected',
+ 'has no AudioParams but test expected ' + limits);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html
new file mode 100644
index 0000000000..faf00c007b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.setTargetAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() and setTargetAtTime at regular intervals to set the
+ // starting value and the target value. Each time interval has a ramp with
+ // a different starting and target value so that there is a discontinuity
+ // at each time interval boundary. The discontinuity is for testing
+ // timing. Also, we alternate between an increasing and decreasing ramp
+ // for each interval.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 6.5683e-4
+
+ // The AudioGainNode starts with this value instead of the default value.
+ let initialValue = 100;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // Generate an exponential approach starting at |startTime| with a target
+ // value of |value|.
+ function automation(value, startTime, endTime){
+ // endTime is not used for setTargetAtTime.
+ gainNode.gain.setTargetAtTime(value, startTime, timeConstant)}
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam setTargetAtTime() functionality.'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, initialValue, setValue, automation,
+ 'setTargetAtTime()', maxAllowedError,
+ createExponentialApproachArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html
new file mode 100644
index 0000000000..ab2edfd009
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audioparam-setValueAtTime.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() at regular intervals to set the value for the duration
+ // of the interval. Each time interval has different value so that there
+ // is a discontinuity at each time interval boundary. The discontinuity
+ // is for testing timing.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 6e-8;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // For testing setValueAtTime(), we don't need to do anything for
+ // automation. because the value at the beginning of the interval is set
+ // by setValue and it remains constant for the duration, which is what we
+ // want.
+ function automation(value, startTime, endTime) {
+ // Do nothing.
+ }
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam setValueAtTime() functionality.'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, 1, setValue, automation,
+ 'setValueAtTime()', maxAllowedError, createConstantArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html
new file mode 100644
index 0000000000..ed0c15fb9b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html
@@ -0,0 +1,426 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Exceptions from setValueCurveAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ // Some short duration because we don't need to run the test for very
+ // long.
+ let testDurationSec = 0.125;
+ let testDurationFrames = testDurationSec * sampleRate;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('setValueCurve', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let curve = new Float32Array(2);
+
+ // Start time and duration for setValueCurveAtTime
+ let curveStartTime = 0.1 * testDurationSec;
+ let duration = 0.1 * testDurationSec;
+
+ // Some time that is known to be during the setValueCurveTime interval.
+ let automationTime = curveStartTime + duration / 2;
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, curveStartTime, duration);
+ },
+ 'setValueCurveAtTime(curve, ' + curveStartTime + ', ' + duration +
+ ')')
+ .notThrow();
+
+ should(
+ function() {
+ g.gain.setValueAtTime(1, automationTime);
+ },
+ 'setValueAtTime(1, ' + automationTime + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.linearRampToValueAtTime(1, automationTime);
+ },
+ 'linearRampToValueAtTime(1, ' + automationTime + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.exponentialRampToValueAtTime(1, automationTime);
+ },
+ 'exponentialRampToValueAtTime(1, ' + automationTime + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.setTargetAtTime(1, automationTime, 1);
+ },
+ 'setTargetAtTime(1, ' + automationTime + ', 1)')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.setValueAtTime(1, curveStartTime + 1.1 * duration);
+ },
+ 'setValueAtTime(1, ' + (curveStartTime + 1.1 * duration) + ')')
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define('value setter', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let curve = new Float32Array(2);
+
+ // Start time and duration for setValueCurveAtTime
+ let curveStartTime = 0.;
+ let duration = 0.2 * testDurationSec;
+
+ // Some time that is known to be during the setValueCurveTime interval.
+ let automationTime = 0.;
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, curveStartTime, duration);
+ },
+ 'setValueCurveAtTime(curve, ' + curveStartTime + ', ' + duration +
+ ')')
+ .notThrow();
+
+ should(
+ function() {
+ g.gain.value = 0.;
+ },
+ 'value setter')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define('automations', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+
+ let curve = new Float32Array(2);
+ // Start time and duration for setValueCurveAtTime
+ let startTime = 0;
+ let timeInterval = testDurationSec / 10;
+ let time;
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.linearRampToValueAtTime(1, startTime);
+ }, 'linearRampToValueAtTime(1, ' + startTime + ')').notThrow();
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.exponentialRampToValueAtTime(1, startTime);
+ }, 'exponentialRampToValueAtTime(1, ' + startTime + ')').notThrow();
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.setTargetAtTime(1, startTime, 0.1);
+ }, 'setTargetAtTime(1, ' + startTime + ', 0.1)').notThrow();
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.setValueCurveAtTime(curve, startTime, 0.1);
+ }, 'setValueCurveAtTime(curve, ' + startTime + ', 0.1)').notThrow();
+
+ // Now try to setValueCurve that overlaps each of the above automations
+ startTime = timeInterval / 2;
+
+ for (let k = 0; k < 4; ++k) {
+ time = startTime + timeInterval * k;
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, 0.01);
+ },
+ 'setValueCurveAtTime(curve, ' + time + ', 0.01)')
+ .throw(DOMException, 'NotSupportedError');
+ }
+
+ // Elements of setValueCurve should be finite.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(
+ Float32Array.from([NaN, NaN]), time, 0.01);
+ },
+ 'setValueCurveAtTime([NaN, NaN], ' + time + ', 0.01)')
+ .throw(TypeError);
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(
+ Float32Array.from([1, Infinity]), time, 0.01);
+ },
+ 'setValueCurveAtTime([1, Infinity], ' + time + ', 0.01)')
+ .throw(TypeError);
+
+ let d = context.createDelay();
+ // Check that we get warnings for out-of-range values and also throw for
+ // non-finite values.
+ should(
+ () => {
+ d.delayTime.setValueCurveAtTime(
+ Float32Array.from([1, 5]), time, 0.01);
+ },
+ 'delayTime.setValueCurveAtTime([1, 5], ' + time + ', 0.01)')
+ .notThrow();
+
+ should(
+ () => {
+ d.delayTime.setValueCurveAtTime(
+ Float32Array.from([1, 5, Infinity]), time, 0.01);
+ },
+ 'delayTime.setValueCurveAtTime([1, 5, Infinity], ' + time +
+ ', 0.01)')
+ .throw(TypeError);
+
+ // One last test that prints out lots of digits for the time.
+ time = Math.PI / 100;
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, 0.01);
+ },
+ 'setValueCurveAtTime(curve, ' + time + ', 0.01)')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define('catch-exception', (task, should) => {
+ // Verify that the curve isn't inserted into the time line even if we
+ // catch the exception.
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let gain = context.createGain();
+ let source = context.createBufferSource();
+ let buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ source.buffer = buffer;
+ source.loop = true;
+
+ source.connect(gain);
+ gain.connect(context.destination);
+
+ gain.gain.setValueAtTime(1, 0);
+ try {
+ // The value curve has an invalid element. This automation shouldn't
+ // be inserted into the timeline at all.
+ gain.gain.setValueCurveAtTime(
+ Float32Array.from([0, NaN]), 128 / context.sampleRate, .5);
+ } catch (e) {
+ };
+ source.start();
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ // Since the setValueCurve wasn't inserted, the output should be
+ // exactly 1 for the entire duration.
+ should(
+ resultBuffer.getChannelData(0),
+ 'Handled setValueCurve exception so output')
+ .beConstantValueOf(1);
+
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('start-end', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let curve = new Float32Array(2);
+
+ // Verify that a setValueCurve can start at the end of an automation.
+ let time = 0;
+ let timeInterval = testDurationSec / 50;
+ should(() => {
+ g.gain.setValueAtTime(1, time);
+ }, 'setValueAtTime(1, ' + time + ')').notThrow();
+
+ time += timeInterval;
+ should(() => {
+ g.gain.linearRampToValueAtTime(0, time);
+ }, 'linearRampToValueAtTime(0, ' + time + ')').notThrow();
+
+ // setValueCurve starts at the end of the linear ramp. This should be
+ // fine.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // exponentialRamp ending one interval past the setValueCurve should be
+ // fine.
+ time += 2 * timeInterval;
+ should(() => {
+ g.gain.exponentialRampToValueAtTime(1, time);
+ }, 'exponentialRampToValueAtTime(1, ' + time + ')').notThrow();
+
+ // setValueCurve starts at the end of the exponential ramp. This should
+ // be fine.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // setValueCurve at the end of the setValueCurve should be fine.
+ time += timeInterval;
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // setValueAtTime at the end of setValueCurve should be fine.
+ time += timeInterval;
+ should(() => {
+ g.gain.setValueAtTime(0, time);
+ }, 'setValueAtTime(0, ' + time + ')').notThrow();
+
+ // setValueCurve at the end of setValueAtTime should be fine.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // setTarget starting at the end of setValueCurve should be fine.
+ time += timeInterval;
+ should(() => {
+ g.gain.setTargetAtTime(1, time, 1);
+ }, 'setTargetAtTime(1, ' + time + ', 1)').notThrow();
+
+ task.done();
+ });
+
+ audit.define('curve overlap', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let startTime = 5;
+ let startTimeLater = 10;
+ let startTimeEarlier = 2.5;
+ let curveDuration = 10;
+ let curveDurationShorter = 5;
+ let curve = [1, 2, 3];
+
+ // An initial curve event
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime, curveDuration);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime}, ${curveDuration})`)
+ .notThrow();
+
+ // Check that an exception is thrown when trying to overlap two curves,
+ // in various ways
+
+ // Same start time and end time (curve exactly overlapping)
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime, curveDuration);
+ },
+ `second g.gain.setValueCurveAtTime([${curve}], ${startTime}, ${curveDuration})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Same start time, shorter end time
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime, curveDurationShorter);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime}, ${curveDurationShorter})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Earlier start time, end time after the start time an another curve
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTimeEarlier, curveDuration);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTimeEarlier}, ${curveDuration})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Start time after the start time of the other curve, but earlier than
+ // its end.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTimeLater, curveDuration);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTimeLater}, ${curveDuration})`)
+ .throw(DOMException, 'NotSupportedError');
+
+ // New event wholly contained inside existing event
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime + 1, curveDuration - 1);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime+1}, ${curveDuration-1})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Old event completely contained inside new event
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime - 1, curveDuration + 1);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime-1}, ${curveDuration+1})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Setting an event exactly at the end of the curve should work.
+ should(
+ () => {
+ g.gain.setValueAtTime(1.0, startTime + curveDuration);
+ },
+ `g.gain.setValueAtTime(1.0, ${startTime + curveDuration})`)
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define('curve lengths', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let time = 0;
+
+ // Check for invalid curve lengths
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(Float32Array.from([]), time, 0.01);
+ },
+ 'setValueCurveAtTime([], ' + time + ', 0.01)')
+ .throw(DOMException, 'InvalidStateError');
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(Float32Array.from([1]), time, 0.01);
+ },
+ 'setValueCurveAtTime([1], ' + time + ', 0.01)')
+ .throw(DOMException, 'InvalidStateError');
+
+ should(() => {
+ g.gain.setValueCurveAtTime(Float32Array.from([1, 2]), time, 0.01);
+ }, 'setValueCurveAtTime([1,2], ' + time + ', 0.01)').notThrow();
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html
new file mode 100644
index 0000000000..de8406244b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.setValueCurveAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode and for each time
+ // interval call setValueCurveAtTime() to set the values for the duration
+ // of the interval. Each curve is a sine wave, and we assume that the
+ // time interval is not an exact multiple of the period. This causes a
+ // discontinuity between time intervals which is used to test timing.
+
+ // Number of tests to run.
+ let numberOfTests = 20;
+
+ // Max allowed difference between the rendered data and the expected
+ // result. Because of the linear interpolation, the rendered curve isn't
+ // exactly the same as the reference. This value is experimentally
+ // determined.
+ let maxAllowedError = 3.7194e-6;
+
+ // The amplitude of the sine wave.
+ let sineAmplitude = 1;
+
+ // Frequency of the sine wave.
+ let freqHz = 440;
+
+ // Curve to use for setValueCurveAtTime().
+ let curve;
+
+ // Sets the curve data for the entire time interval.
+ function automation(value, startTime, endTime) {
+ gainNode.gain.setValueCurveAtTime(
+ curve, startTime, endTime - startTime);
+ }
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam setValueCurveAtTime() functionality.'
+ },
+ function(task, should) {
+ // The curve of values to use.
+ curve = createSineWaveArray(
+ timeInterval, freqHz, sineAmplitude, sampleRate);
+
+ createAudioGraphAndTest(
+ task, should, numberOfTests, sineAmplitude,
+ function(k) {
+ // Don't need to set the value.
+ },
+ automation, 'setValueCurveAtTime()', maxAllowedError,
+ createReferenceSineArray,
+ 2 * Math.PI * sineAmplitude * freqHz / sampleRate,
+ differenceErrorMetric);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html
new file mode 100644
index 0000000000..9084942f70
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html
@@ -0,0 +1,120 @@
+<!DOCTYPE html>
+<!--
+Tests that multiple audio-rate signals (AudioNode outputs) can be connected to an AudioParam
+and that these signals are summed, along with the AudioParams intrinsic value.
+-->
+<html>
+ <head>
+ <title>
+ audioparam-summingjunction.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/mix-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100.0;
+ let lengthInSeconds = 1;
+
+ let context = 0;
+
+ // Buffers used by the two gain controlling sources.
+ let linearRampBuffer;
+ let toneBuffer;
+ let toneFrequency = 440;
+
+ // Arbitrary non-zero value.
+ let baselineGain = 5;
+
+ // Allow for a small round-off error.
+ let maxAllowedError = 1e-6;
+
+ function checkResult(renderedBuffer, should) {
+ let renderedData = renderedBuffer.getChannelData(0);
+
+ // Get buffer data from the two sources used to control gain.
+ let linearRampData = linearRampBuffer.getChannelData(0);
+ let toneData = toneBuffer.getChannelData(0);
+
+ let n = renderedBuffer.length;
+
+ should(n, 'Rendered signal length').beEqualTo(linearRampBuffer.length);
+
+ // Check that the rendered result exactly matches the sum of the
+ // intrinsic gain plus the two sources used to control gain. This is
+ // because we're changing the gain of a signal having constant value 1.
+ let success = true;
+ for (let i = 0; i < n; ++i) {
+ let expectedValue = baselineGain + linearRampData[i] + toneData[i];
+ let error = Math.abs(expectedValue - renderedData[i]);
+
+ if (error > maxAllowedError) {
+ success = false;
+ break;
+ }
+ }
+
+ should(
+ success,
+ 'Rendered signal matches sum of two audio-rate gain changing signals plus baseline gain')
+ .beTrue();
+ }
+
+ audit.define('test', function(task, should) {
+ let sampleFrameLength = sampleRate * lengthInSeconds;
+
+ // Create offline audio context.
+ context = new OfflineAudioContext(1, sampleFrameLength, sampleRate);
+
+ // Create buffer used by the source which will have its gain controlled.
+ let constantOneBuffer =
+ createConstantBuffer(context, sampleFrameLength, 1);
+ let constantSource = context.createBufferSource();
+ constantSource.buffer = constantOneBuffer;
+
+ // Create 1st buffer used to control gain (a linear ramp).
+ linearRampBuffer = createLinearRampBuffer(context, sampleFrameLength);
+ let gainSource1 = context.createBufferSource();
+ gainSource1.buffer = linearRampBuffer;
+
+ // Create 2st buffer used to control gain (a simple sine wave tone).
+ toneBuffer =
+ createToneBuffer(context, toneFrequency, lengthInSeconds, 1);
+ let gainSource2 = context.createBufferSource();
+ gainSource2.buffer = toneBuffer;
+
+ // Create a gain node controlling the gain of constantSource and make
+ // the connections.
+ let gainNode = context.createGain();
+
+ // Intrinsic baseline gain.
+ // This gain value should be summed with gainSource1 and gainSource2.
+ gainNode.gain.value = baselineGain;
+
+ constantSource.connect(gainNode);
+ gainNode.connect(context.destination);
+
+ // Connect two audio-rate signals to control the .gain AudioParam.
+ gainSource1.connect(gainNode.gain);
+ gainSource2.connect(gainNode.gain);
+
+ // Start all sources at time 0.
+ constantSource.start(0);
+ gainSource1.start(0);
+ gainSource2.start(0);
+
+ context.startRendering().then(buffer => {
+ checkResult(buffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js
new file mode 100644
index 0000000000..43279f91d6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js
@@ -0,0 +1,155 @@
+// Test k-rate vs a-rate AudioParams.
+//
+// |options| describes how the testing of the AudioParam should be done:
+//
+// sourceNodeName: name of source node to use for testing; defaults to
+// 'OscillatorNode'. If set to 'none', then no source node
+// is created for testing and it is assumed that the AudioNode
+// under test are sources and need to be started.
+// verifyPieceWiseConstant: if true, verify that the k-rate output is
+// piecewise constant for each render quantum.
+// nodeName: name of the AudioNode to be tested
+// nodeOptions: options to be used in the AudioNode constructor
+//
+// prefix: Prefix for all output messages (to make them unique for
+// testharness)
+//
+// rateSettings: A vector of dictionaries specifying how to set the automation
+// rate(s):
+// name: Name of the AudioParam
+// value: The automation rate for the AudioParam given by |name|.
+//
+// automations: A vector of dictionaries specifying how to automate each
+// AudioParam:
+// name: Name of the AudioParam
+//
+// methods: A vector of dictionaries specifying the automation methods to
+// be used for testing:
+// name: Automation method to call
+// options: Arguments for the automation method
+//
+// Testing is somewhat rudimentary. We create two nodes of the same type. One
+// node uses the default automation rates for each AudioParam (expecting them to
+// be a-rate). The second node sets the automation rate of AudioParams to
+// "k-rate". The set is speciified by |options.rateSettings|.
+//
+// For both of these nodes, the same set of automation methods (given by
+// |options.automations|) is applied. A simple oscillator is connected to each
+// node which in turn are connected to different channels of an offline context.
+// Channel 0 is the k-rate node output; channel 1, the a-rate output; and
+// channel 3, the difference between the outputs.
+//
+// Success is declared if the difference signal is not exactly zero. This means
+// the the automations did different things, as expected.
+//
+// The promise from |startRendering| is returned.
+function doTest(context, should, options) {
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = null;
+
+ // Skip creating a source to drive the graph if |sourceNodeName| is 'none'.
+ // If |sourceNodeName| is given, use that, else default to OscillatorNode.
+ if (options.sourceNodeName !== 'none') {
+ src = new window[options.sourceNodeName || 'OscillatorNode'](context);
+ }
+
+ let kRateNode = new window[options.nodeName](context, options.nodeOptions);
+ let aRateNode = new window[options.nodeName](context, options.nodeOptions);
+ let inverter = new GainNode(context, {gain: -1});
+
+ // Set kRateNode filter to use k-rate params.
+ options.rateSettings.forEach(setting => {
+ kRateNode[setting.name].automationRate = setting.value;
+ // Mostly for documentation in the output. These should always
+ // pass.
+ should(
+ kRateNode[setting.name].automationRate,
+ `${options.prefix}: Setting ${
+ setting.name
+ }.automationRate to "${setting.value}"`)
+ .beEqualTo(setting.value);
+ });
+
+ // Run through all automations for each node separately. (Mostly to keep
+ // output of automations together.)
+ options.automations.forEach(param => {
+ param.methods.forEach(method => {
+ // Most for documentation in the output. These should never throw.
+ let message = `${param.name}.${method.name}(${method.options})`
+ should(() => {
+ kRateNode[param.name][method.name](...method.options);
+ }, options.prefix + ': k-rate node: ' + message).notThrow();
+ });
+ });
+ options.automations.forEach(param => {
+ param.methods.forEach(method => {
+ // Most for documentation in the output. These should never throw.
+ let message = `${param.name}.${method.name}(${method.options})`
+ should(() => {
+ aRateNode[param.name][method.name](...method.options);
+ }, options.prefix + ': a-rate node:' + message).notThrow();
+ });
+ });
+
+ // Connect the source, if specified.
+ if (src) {
+ src.connect(kRateNode);
+ src.connect(aRateNode);
+ }
+
+ // The k-rate result is channel 0, and the a-rate result is channel 1.
+ kRateNode.connect(merger, 0, 0);
+ aRateNode.connect(merger, 0, 1);
+
+ // Compute the difference between the a-rate and k-rate results and send
+ // that to channel 2.
+ kRateNode.connect(merger, 0, 2);
+ aRateNode.connect(inverter).connect(merger, 0, 2);
+
+ if (src) {
+ src.start();
+ } else {
+ // If there's no source, then assume the test nodes are sources and start
+ // them.
+ kRateNode.start();
+ aRateNode.start();
+ }
+
+ return context.startRendering().then(renderedBuffer => {
+ let kRateOutput = renderedBuffer.getChannelData(0);
+ let aRateOutput = renderedBuffer.getChannelData(1);
+ let diff = renderedBuffer.getChannelData(2);
+
+ // Some informative messages to print out values of the k-rate and
+ // a-rate outputs. These should always pass.
+ should(
+ kRateOutput, `${options.prefix}: Output of k-rate ${options.nodeName}`)
+ .beEqualToArray(kRateOutput);
+ should(
+ aRateOutput, `${options.prefix}: Output of a-rate ${options.nodeName}`)
+ .beEqualToArray(aRateOutput);
+
+ // The real test. If k-rate AudioParam is working correctly, the
+ // k-rate result MUST differ from the a-rate result.
+ should(
+ diff,
+ `${
+ options.prefix
+ }: Difference between a-rate and k-rate ${options.nodeName}`)
+ .notBeConstantValueOf(0);
+
+ if (options.verifyPieceWiseConstant) {
+ // Verify that the output from the k-rate parameter is step-wise
+ // constant.
+ for (let k = 0; k < kRateOutput.length; k += 128) {
+ should(
+ kRateOutput.slice(k, k + 128),
+ `${options.prefix} k-rate output [${k}: ${k + 127}]`)
+ .beConstantValueOf(kRateOutput[k]);
+ }
+ }
+ });
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html
new file mode 100644
index 0000000000..a3c11994bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html
@@ -0,0 +1,167 @@
+<!doctype html>
+<html>
+ <head>
+ <title>AudioParam.automationRate tests</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // For each node that has an AudioParam, verify that the default
+ // |automationRate| has the expected value and that we can change it or
+ // throw an error if it can't be changed.
+
+ // Any valid sample rate is fine; we don't actually render anything in the
+ // tests.
+ let sampleRate = 8000;
+
+ let audit = Audit.createTaskRunner();
+
+ // Array of tests. Each test is a dictonary consisting of the name of the
+ // node and an array specifying the AudioParam's of the node. This array
+ // in turn gives the name of the AudioParam, the default value for the
+ // |automationRate|, and whether it is fixed (isFixed).
+ const tests = [
+ {
+ nodeName: 'AudioBufferSourceNode',
+ audioParams: [
+ {name: 'detune', defaultRate: 'k-rate', isFixed: true},
+ {name: 'playbackRate', defaultRate: 'k-rate', isFixed: true}
+ ]
+ },
+ {
+ nodeName: 'BiquadFilterNode',
+ audioParams: [
+ {name: 'frequency', defaultRate: 'a-rate', isFixed: false},
+ {name: 'detune', defaultRate: 'a-rate', isFixed: false},
+ {name: 'Q', defaultRate: 'a-rate', isFixed: false},
+ {name: 'gain', defaultRate: 'a-rate', isFixed: false},
+ ]
+ },
+ {
+ nodeName: 'ConstantSourceNode',
+ audioParams: [{name: 'offset', defaultRate: 'a-rate', isFixed: false}]
+ },
+ {
+ nodeName: 'DelayNode',
+ audioParams:
+ [{name: 'delayTime', defaultRate: 'a-rate', isFixed: false}]
+ },
+ {
+ nodeName: 'DynamicsCompressorNode',
+ audioParams: [
+ {name: 'threshold', defaultRate: 'k-rate', isFixed: true},
+ {name: 'knee', defaultRate: 'k-rate', isFixed: true},
+ {name: 'ratio', defaultRate: 'k-rate', isFixed: true},
+ {name: 'attack', defaultRate: 'k-rate', isFixed: true},
+ {name: 'release', defaultRate: 'k-rate', isFixed: true}
+ ]
+ },
+ {
+ nodeName: 'GainNode',
+ audioParams: [{name: 'gain', defaultRate: 'a-rate', isFixed: false}]
+ },
+ {
+ nodeName: 'OscillatorNode',
+ audioParams: [
+ {name: 'frequency', defaultRate: 'a-rate', isFixed: false},
+ {name: 'detune', defaultRate: 'a-rate', isFixed: false}
+ ]
+ },
+ {
+ nodeName: 'PannerNode',
+ audioParams: [
+ {name: 'positionX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionZ', defaultRate: 'a-rate', isFixed: false},
+ {name: 'orientationX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'orientationY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'orientationZ', defaultRate: 'a-rate', isFixed: false},
+ ]
+ },
+ {
+ nodeName: 'StereoPannerNode',
+ audioParams: [{name: 'pan', defaultRate: 'a-rate', isFixed: false}]
+ },
+ ];
+
+ tests.forEach(test => {
+ // Define a separate test for each test entry.
+ audit.define(test.nodeName, (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: sampleRate, sampleRate: sampleRate});
+ // Construct the node and test each AudioParam of the node.
+ let node = new window[test.nodeName](context);
+ test.audioParams.forEach(param => {
+ testAudioParam(
+ should, {nodeName: test.nodeName, node: node, param: param});
+ });
+
+ task.done();
+ });
+ });
+
+ // AudioListener needs it's own special test since it's not a node.
+ audit.define('AudioListener', (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: sampleRate, sampleRate: sampleRate});
+
+ [{name: 'positionX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionZ', defaultRate: 'a-rate', isFixed: false},
+ {name: 'forwardX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'forwardY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'forwardZ', defaultRate: 'a-rate', isFixed: false},
+ {name: 'upX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'upY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'upZ', defaultRate: 'a-rate', isFixed: false},
+ ].forEach(param => {
+ testAudioParam(should, {
+ nodeName: 'AudioListener',
+ node: context.listener,
+ param: param
+ });
+ });
+ task.done();
+ });
+
+ audit.run();
+
+ function testAudioParam(should, options) {
+ let param = options.param;
+ let audioParam = options.node[param.name];
+ let defaultRate = param.defaultRate;
+
+ // Verify that the default value is correct.
+ should(
+ audioParam.automationRate,
+ `Default ${options.nodeName}.${param.name}.automationRate`)
+ .beEqualTo(defaultRate);
+
+ // Try setting the rate to a different rate. If the |automationRate|
+ // is fixed, expect an error. Otherwise, expect no error and expect
+ // the value is changed to the new value.
+ let newRate = defaultRate === 'a-rate' ? 'k-rate' : 'a-rate';
+ let setMessage = `Set ${
+ options.nodeName
+ }.${param.name}.automationRate to "${newRate}"`
+
+ if (param.isFixed) {
+ should(() => audioParam.automationRate = newRate, setMessage)
+ .throw(DOMException, 'InvalidStateError');
+ }
+ else {
+ should(() => audioParam.automationRate = newRate, setMessage)
+ .notThrow();
+ should(
+ audioParam.automationRate,
+ `${options.nodeName}.${param.name}.automationRate`)
+ .beEqualTo(newRate);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html
new file mode 100644
index 0000000000..ac1da8cd51
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html
@@ -0,0 +1,155 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ cancelScheduledValues
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let sampleRate = 8000;
+ let renderFrames = 8000;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'cancel-time', description: 'handle cancelTime values'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: renderFrames,
+ sampleRate: sampleRate
+ });
+
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ should(
+ () => src.offset.cancelScheduledValues(-1),
+ 'cancelScheduledValues(-1)')
+ .throw(RangeError);
+
+ // These are TypeErrors because |cancelTime| is a
+ // double, not unrestricted double.
+ should(
+ () => src.offset.cancelScheduledValues(NaN),
+ 'cancelScheduledValues(NaN)')
+ .throw(TypeError);
+
+ should(
+ () => src.offset.cancelScheduledValues(Infinity),
+ 'cancelScheduledValues(Infinity)')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'cancel1', description: 'cancel setValueCurve'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: renderFrames,
+ sampleRate: sampleRate
+ });
+
+ let src = new ConstantSourceNode(context);
+ let gain = new GainNode(context);
+ src.connect(gain).connect(context.destination);
+
+ // Initial time and value for first automation (setValue)
+ let time0 = 0;
+ let value0 = 0.5;
+
+ // Time and duration of the setValueCurve. We'll also schedule a
+ // setValue at the same time.
+ let value1 = 1.5;
+ let curveStartTime = 0.25;
+ let curveDuration = 0.25;
+
+ // Time at which to cancel events
+ let cancelTime = 0.3;
+
+ // Time and value for event added after cancelScheduledValues has
+ // been called.
+ let time2 = curveStartTime + curveDuration / 2;
+ let value2 = 3;
+
+ // Self-consistency checks for the test.
+ should(cancelTime, 'cancelTime is after curve start')
+ .beGreaterThan(curveStartTime);
+ should(cancelTime, 'cancelTime is before curve ends')
+ .beLessThan(curveStartTime + curveDuration);
+
+ // These assertions are just to show what's happening
+ should(
+ () => gain.gain.setValueAtTime(value0, time0),
+ `gain.gain.setValueAtTime(${value0}, ${time0})`)
+ .notThrow();
+ // setValue at the sime time as the curve, to test that this event
+ // wasn't rmeoved.
+ should(
+ () => gain.gain.setValueAtTime(value1, curveStartTime),
+ `gain.gain.setValueAtTime(${value1}, ${curveStartTime})`)
+ .notThrow();
+
+ should(
+ () => gain.gain.setValueCurveAtTime(
+ [1, -1], curveStartTime, curveDuration),
+ `gain.gain.setValueCurveAtTime(..., ${curveStartTime}, ${
+ curveDuration})`)
+ .notThrow();
+
+ // An event after the curve to verify this is removed.
+ should(
+ () => gain.gain.setValueAtTime(
+ 99, curveStartTime + curveDuration),
+ `gain.gain.setValueAtTime(99, ${
+ curveStartTime + curveDuration})`)
+ .notThrow();
+
+ // Cancel events now.
+ should(
+ () => gain.gain.cancelScheduledValues(cancelTime),
+ `gain.gain.cancelScheduledValues(${cancelTime})`)
+ .notThrow();
+
+ // Simple check that the setValueCurve is gone, by scheduling
+ // something in the middle of the (now deleted) event
+ should(
+ () => gain.gain.setValueAtTime(value2, time2),
+ `gain.gain.setValueAtTime(${value2}, ${time2})`)
+ .notThrow();
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let audio = buffer.getChannelData(0);
+
+ // After canceling events, verify that the outputs have the
+ // desired values.
+ let curveFrame = curveStartTime * context.sampleRate;
+ should(
+ audio.slice(0, curveFrame), `output[0:${curveFrame - 1}]`)
+ .beConstantValueOf(value0);
+
+ let time2Frame = time2 * context.sampleRate;
+ should(
+ audio.slice(curveFrame, time2Frame),
+ `output[${curveFrame}:${time2Frame - 1}]`)
+ .beConstantValueOf(value1);
+
+ should(audio.slice(time2Frame), `output[${time2Frame}:]`)
+ .beConstantValueOf(value2);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html
new file mode 100644
index 0000000000..b846f982ab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html
@@ -0,0 +1,411 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Handling of Event Insertion
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audio-param.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Use a power of two for the sample rate so there's no round-off in
+ // computing time from frame.
+ let sampleRate = 16384;
+
+ audit.define(
+ {label: 'Insert same event at same time'}, (task, should) => {
+ // Context for testing.
+ let context = new OfflineAudioContext(
+ {length: 16384, sampleRate: sampleRate});
+
+ // The source node to use. Automations will be scheduled here.
+ let src = new ConstantSourceNode(context, {offset: 0});
+ src.connect(context.destination);
+
+ // An array of tests to be done. Each entry specifies the event
+ // type and the event time. The events are inserted in the order
+ // given (in |values|), and the second event should be inserted
+ // after the first one, as required by the spec.
+ let testCases = [
+ {
+ event: 'setValueAtTime',
+ frame: RENDER_QUANTUM_FRAMES,
+ values: [99, 1],
+ outputTestFrame: RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 1
+ },
+ {
+ event: 'linearRampToValueAtTime',
+ frame: 2 * RENDER_QUANTUM_FRAMES,
+ values: [99, 2],
+ outputTestFrame: 2 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 2
+ },
+ {
+ event: 'exponentialRampToValueAtTime',
+ frame: 3 * RENDER_QUANTUM_FRAMES,
+ values: [99, 3],
+ outputTestFrame: 3 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 3
+ },
+ {
+ event: 'setValueCurveAtTime',
+ frame: 3 * RENDER_QUANTUM_FRAMES,
+ values: [[3, 4]],
+ extraArgs: RENDER_QUANTUM_FRAMES / context.sampleRate,
+ outputTestFrame: 4 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 4
+ },
+ {
+ event: 'setValueAtTime',
+ frame: 5 * RENDER_QUANTUM_FRAMES - 1,
+ values: [99, 1, 5],
+ outputTestFrame: 5 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 5
+ }
+ ];
+
+ testCases.forEach(entry => {
+ entry.values.forEach(value => {
+ let eventTime = entry.frame / context.sampleRate;
+ let message = eventToString(
+ entry.event, value, eventTime, entry.extraArgs);
+ // This is mostly to print out the event that is getting
+ // inserted. It should never ever throw.
+ should(() => {
+ src.offset[entry.event](value, eventTime, entry.extraArgs);
+ }, message).notThrow();
+ });
+ });
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let audio = audioBuffer.getChannelData(0);
+
+ // Look through the test cases to figure out what the correct
+ // output values should be.
+ testCases.forEach(entry => {
+ let expected = entry.expectedOutputValue;
+ let frame = entry.outputTestFrame;
+ let time = frame / context.sampleRate;
+ should(
+ audio[frame], `Output at frame ${frame} (time ${time})`)
+ .beEqualTo(expected);
+ });
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Linear + Expo',
+ description: 'Different events at same time'
+ },
+ (task, should) => {
+ // Should be a linear ramp up to the event time, and after a
+ // constant value because the exponential ramp has ended.
+ let testCase = [
+ {event: 'linearRampToValueAtTime', value: 2, relError: 0},
+ {event: 'setValueAtTime', value: 99},
+ {event: 'exponentialRampToValueAtTime', value: 3},
+ ];
+ let eventFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let prefix = 'Linear+Expo: ';
+
+ testEventInsertion(prefix, should, eventFrame, testCase)
+ .then(expectConstant(prefix, should, eventFrame, testCase))
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Expo + Linear',
+ description: 'Different events at same time',
+ },
+ (task, should) => {
+ // Should be an exponential ramp up to the event time, and after a
+ // constant value because the linear ramp has ended.
+ let testCase = [
+ {
+ event: 'exponentialRampToValueAtTime',
+ value: 3,
+ relError: 4.2533e-6
+ },
+ {event: 'setValueAtTime', value: 99},
+ {event: 'linearRampToValueAtTime', value: 2},
+ ];
+ let eventFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let prefix = 'Expo+Linear: ';
+
+ testEventInsertion(prefix, should, eventFrame, testCase)
+ .then(expectConstant(prefix, should, eventFrame, testCase))
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Linear + SetTarget',
+ description: 'Different events at same time',
+ },
+ (task, should) => {
+ // Should be a linear ramp up to the event time, and then a
+ // decaying value.
+ let testCase = [
+ {event: 'linearRampToValueAtTime', value: 3, relError: 0},
+ {event: 'setValueAtTime', value: 100},
+ {event: 'setTargetAtTime', value: 0, extraArgs: 0.1},
+ ];
+ let eventFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let prefix = 'Linear+SetTarget: ';
+
+ testEventInsertion(prefix, should, eventFrame, testCase)
+ .then(audioBuffer => {
+ let audio = audioBuffer.getChannelData(0);
+ let prefix = 'Linear+SetTarget: ';
+ let eventTime = eventFrame / sampleRate;
+ let expectedValue = methodMap[testCase[0].event](
+ (eventFrame - 1) / sampleRate, 1, 0, testCase[0].value,
+ eventTime);
+ should(
+ audio[eventFrame - 1],
+ prefix +
+ `At time ${
+ (eventFrame - 1) / sampleRate
+ } (frame ${eventFrame - 1}) output`)
+ .beCloseTo(
+ expectedValue,
+ {threshold: testCase[0].relError || 0});
+
+ // The setValue should have taken effect
+ should(
+ audio[eventFrame],
+ prefix +
+ `At time ${eventTime} (frame ${eventFrame}) output`)
+ .beEqualTo(testCase[1].value);
+
+ // The final event is setTarget. Compute the expected output.
+ let actual = audio.slice(eventFrame);
+ let expected = new Float32Array(actual.length);
+ for (let k = 0; k < expected.length; ++k) {
+ let t = (eventFrame + k) / sampleRate;
+ expected[k] = audioParamSetTarget(
+ t, testCase[1].value, eventTime, testCase[2].value,
+ testCase[2].extraArgs);
+ }
+ should(
+ actual,
+ prefix +
+ `At time ${eventTime} (frame ${
+ eventFrame
+ }) and later`)
+ .beCloseToArray(expected, {relativeThreshold: 2.6694e-7});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Multiple linear ramps at the same time',
+ description: 'Verify output'
+ },
+ (task, should) => {
+ testMultipleSameEvents(should, {
+ method: 'linearRampToValueAtTime',
+ prefix: 'Multiple linear ramps: ',
+ threshold: 0
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Multiple exponential ramps at the same time',
+ description: 'Verify output'
+ },
+ (task, should) => {
+ testMultipleSameEvents(should, {
+ method: 'exponentialRampToValueAtTime',
+ prefix: 'Multiple exponential ramps: ',
+ threshold: 5.3924e-7
+ }).then(() => task.done());
+ });
+
+ audit.run();
+
+ // Takes a list of |testCases| consisting of automation methods and
+ // schedules them to occur at |eventFrame|. |prefix| is a prefix for
+ // messages produced by |should|.
+ //
+ // Each item in |testCases| is a dictionary with members:
+ // event - the name of automation method to be inserted,
+ // value - the value for the event,
+ // extraArgs - extra arguments if the event needs more than the value
+ // and time (such as setTargetAtTime).
+ function testEventInsertion(prefix, should, eventFrame, testCases) {
+ let context = new OfflineAudioContext(
+ {length: 4 * RENDER_QUANTUM_FRAMES, sampleRate: sampleRate});
+
+ // The source node to use. Automations will be scheduled here.
+ let src = new ConstantSourceNode(context, {offset: 0});
+ src.connect(context.destination);
+
+ // Initialize value to 1 at the beginning.
+ src.offset.setValueAtTime(1, 0);
+
+ // Test automations have this event time.
+ let eventTime = eventFrame / context.sampleRate;
+
+ // Sanity check that context is long enough for the test
+ should(
+ eventFrame < context.length,
+ prefix + 'Context length is long enough for the test')
+ .beTrue();
+
+ // Automations to be tested. The first event should be the actual
+ // output up to the event time. The last event should be the final
+ // output from the event time and onwards.
+ testCases.forEach(entry => {
+ should(
+ () => {
+ src.offset[entry.event](
+ entry.value, eventTime, entry.extraArgs);
+ },
+ prefix +
+ eventToString(
+ entry.event, entry.value, eventTime, entry.extraArgs))
+ .notThrow();
+ });
+
+ src.start();
+
+ return context.startRendering();
+ }
+
+ // Verify output of test where the final value of the automation is
+ // expected to be constant.
+ function expectConstant(prefix, should, eventFrame, testCases) {
+ return audioBuffer => {
+ let audio = audioBuffer.getChannelData(0);
+
+ let eventTime = eventFrame / sampleRate;
+
+ // Compute the expected value of the first automation one frame before
+ // the event time. This is a quick check that the correct automation
+ // was done.
+ let expectedValue = methodMap[testCases[0].event](
+ (eventFrame - 1) / sampleRate, 1, 0, testCases[0].value,
+ eventTime);
+ should(
+ audio[eventFrame - 1],
+ prefix +
+ `At time ${
+ (eventFrame - 1) / sampleRate
+ } (frame ${eventFrame - 1}) output`)
+ .beCloseTo(expectedValue, {threshold: testCases[0].relError});
+
+ // The last event scheduled is expected to set the value for all
+ // future times. Verify that the output has the expected value.
+ should(
+ audio.slice(eventFrame),
+ prefix +
+ `At time ${eventTime} (frame ${
+ eventFrame
+ }) and later, output`)
+ .beConstantValueOf(testCases[testCases.length - 1].value);
+ };
+ }
+
+ // Test output when two events of the same time are scheduled at the same
+ // time.
+ function testMultipleSameEvents(should, options) {
+ let {method, prefix, threshold} = options;
+
+ // Context for testing.
+ let context =
+ new OfflineAudioContext({length: 16384, sampleRate: sampleRate});
+
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ let initialValue = 1;
+
+ // Informative print
+ should(() => {
+ src.offset.setValueAtTime(initialValue, 0);
+ }, prefix + `setValueAtTime(${initialValue}, 0)`).notThrow();
+
+ let frame = 64;
+ let time = frame / context.sampleRate;
+ let values = [2, 7, 10];
+
+ // Schedule two events of the same type at the same time, but with
+ // different values.
+
+ values.forEach(value => {
+ // Informative prints to show what we're doing in this test.
+ should(
+ () => {
+ src.offset[method](value, time);
+ },
+ prefix +
+ eventToString(
+ method,
+ value,
+ time,
+ ))
+ .notThrow();
+ })
+
+ src.start();
+
+ return context.startRendering().then(audioBuffer => {
+ let actual = audioBuffer.getChannelData(0);
+
+ // The output should be a ramp from time 0 to the event time. But we
+ // only verify the value just before the event time, which should be
+ // fairly close to values[0]. (But compute the actual expected value
+ // to be sure.)
+ let expected = methodMap[method](
+ (frame - 1) / context.sampleRate, initialValue, 0, values[0],
+ time);
+ should(actual[frame - 1], prefix + `Output at frame ${frame - 1}`)
+ .beCloseTo(expected, {threshold: threshold, precision: 3});
+
+ // Any other values shouldn't show up in the output. Only the value
+ // from last event should appear. We only check the value at the
+ // event time.
+ should(
+ actual[frame], prefix + `Output at frame ${frame} (${time} sec)`)
+ .beEqualTo(values[values.length - 1]);
+ });
+ }
+
+ // Convert an automation method to a string for printing.
+ function eventToString(method, value, time, extras) {
+ let string = method + '(';
+ string += (value instanceof Array) ? `[${value}]` : value;
+ string += ', ' + time;
+ if (extras) {
+ string += ', ' + extras;
+ }
+ string += ')';
+ return string;
+ }
+
+ // Map between the automation method name and a function that computes the
+ // output value of the automation method.
+ const methodMap = {
+ linearRampToValueAtTime: audioParamLinearRamp,
+ exponentialRampToValueAtTime: audioParamExponentialRamp,
+ setValueAtTime: (t, v) => v
+ };
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html
new file mode 100644
index 0000000000..0b94bd70f9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html
@@ -0,0 +1,164 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with inputs for AudioBufferSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Fairly abitrary sampleRate and somewhat duration
+ const sampleRate = 8000;
+ const testDuration = 0.25;
+
+ [['playbackRate', [1, 0], [2, testDuration]],
+ ['detune', [-1200, 0], [1200, testDuration]]]
+ .forEach(param => {
+ audit.define(
+ {label: param[0], description: `AudioBufferSource ${param[0]}`},
+ async (task, should) => {
+ await doTest(should, {
+ prefix: task.label,
+ paramName: param[0],
+ startValue: param[1],
+ endValue: param[2]
+ });
+ task.done();
+ });
+ });
+
+ audit.run();
+
+ async function doTest(should, options) {
+ // Test k-rate automation of AudioBufferSourceNode with connected
+ // input.
+ //
+ // A reference source node is created with an automation on the
+ // selected AudioParam. For simplicity, we just use a linear ramp from
+ // the minValue to the maxValue of the AudioParam.
+ //
+ // The test node has an input signal connected to the AudioParam. This
+ // input signal is created to match the automation on the reference
+ // node.
+ //
+ // Finally, the output from the two nodes must be identical if k-rate
+ // inputs are working correctly.
+ //
+ // Options parameter is a dictionary with the following required
+ // members:
+ // prefix - prefix to use for the messages.
+ // paramName - Name of the AudioParam to be tested
+
+ let {prefix, paramName, startValue, endValue} = options;
+
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Linear ramp to use for the buffer sources
+ let ramp = createLinearRampBuffer(context, context.length);
+
+ // Create the reference and test nodes.
+ let refNode;
+ let tstNode;
+
+ const nodeOptions = {buffer: ramp};
+
+ should(
+ () => refNode = new AudioBufferSourceNode(context, nodeOptions),
+ `${prefix}: refNode = new AudioBufferSourceNode(context, ${
+ JSON.stringify(nodeOptions)})`)
+ .notThrow();
+
+ should(
+ () => tstNode = new AudioBufferSourceNode(context, nodeOptions),
+ `${prefix}: tstNode = new AudioBufferSourceNode(context, ${
+ JSON.stringify(nodeOptions)})`)
+ .notThrow();
+
+
+ // Automate the AudioParam of the reference node with a linear ramp
+ should(
+ () => refNode[paramName].setValueAtTime(...startValue),
+ `${prefix}: refNode[${paramName}].setValueAtTime(${
+ startValue[0]}, ${startValue[1]})`)
+ .notThrow();
+
+ should(
+ () => refNode[paramName].linearRampToValueAtTime(...endValue),
+ `${prefix}: refNode[${paramName}].linearRampToValueAtTime(${
+ endValue[0]}, ${endValue[1]})`)
+ .notThrow();
+
+
+ // Create the input node and automate it so that it's output when added
+ // to the intrinsic value of the AudioParam we get the same values as
+ // the automations on the reference node.
+
+ // Compute the start and end values based on the defaultValue of the
+ // param and the desired startValue and endValue. The input is added to
+ // the intrinsic value of the AudioParam, so we need to account for
+ // that.
+
+ let mod;
+ should(
+ () => mod = new ConstantSourceNode(context, {offset: 0}),
+ `${prefix}: mod = new ConstantSourceNode(context, {offset: 0})`)
+ .notThrow();
+
+ let modStart = startValue[0] - refNode[paramName].defaultValue;
+ let modEnd = endValue[0] - refNode[paramName].defaultValue;
+ should(
+ () => mod.offset.setValueAtTime(modStart, startValue[1]),
+ `${prefix}: mod.offset.setValueAtTime(${modStart}, ${
+ startValue[1]})`)
+ .notThrow();
+ should(
+ () => mod.offset.linearRampToValueAtTime(modEnd, endValue[1]),
+ `${prefix}: mod.offset.linearRampToValueAtTime(${modEnd}, ${
+ endValue[1]})`)
+ .notThrow();
+
+ // Connect up everything.
+ should(
+ () => mod.connect(tstNode[paramName]),
+ `${prefix}: mod.connect(tstNode[${paramName}])`)
+ .notThrow();
+
+ refNode.connect(merger, 0, 0);
+ tstNode.connect(merger, 0, 1);
+
+ // Go!
+ refNode.start();
+ tstNode.start();
+ mod.start();
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Quick sanity check that output isn't zero. This means we messed up
+ // the connections or automations or the buffer source.
+ should(expected, `Expected k-rate ${paramName} AudioParam with input`)
+ .notBeConstantValueOf(0);
+ should(actual, `Actual k-rate ${paramName} AudioParam with input`)
+ .notBeConstantValueOf(0);
+
+ // The expected and actual results must be EXACTLY the same.
+ should(actual, `k-rate ${paramName} AudioParam with input`)
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html
new file mode 100644
index 0000000000..4d2eb40d55
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html
@@ -0,0 +1,77 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams with inputs for AudioWorkletNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Use the worklet gain node to test k-rate parameters.
+ const filePath =
+ '../the-audioworklet-interface/processors/gain-processor.js';
+
+ // Context for testing
+ let context;
+
+ audit.define('Create Test Worklet', (task, should) => {
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ const testDuration = 4 * 128 / sampleRate;
+
+ context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'Construction of AudioWorklet')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('AudioWorklet k-rate AudioParam', async (task, should) => {
+ let src = new ConstantSourceNode(context);
+ let kRateNode = new AudioWorkletNode(context, 'gain');
+ src.connect(kRateNode).connect(context.destination);
+
+ let kRateParam = kRateNode.parameters.get('gain');
+ kRateParam.automationRate = 'k-rate';
+ kRateParam.value = 0;
+
+ let mod = new ConstantSourceNode(context);
+ mod.offset.setValueAtTime(0, 0);
+ mod.offset.linearRampToValueAtTime(
+ 10, context.length / context.sampleRate);
+ mod.connect(kRateParam);
+
+ mod.start();
+ src.start();
+
+ const audioBuffer = await context.startRendering();
+ let output = audioBuffer.getChannelData(0);
+
+ // Verify that the output isn't constantly zero.
+ should(output, 'output').notBeConstantValueOf(0);
+ // Verify that the output from the worklet is step-wise
+ // constant.
+ for (let k = 0; k < output.length; k += 128) {
+ should(output.slice(k, k + 128), ` k-rate output [${k}: ${k + 127}]`)
+ .beConstantValueOf(output[k]);
+ }
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html
new file mode 100644
index 0000000000..e891da6da2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html
@@ -0,0 +1,79 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of AudioWorkletNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Use the worklet gain node to test k-rate parameters.
+ const filePath =
+ '../the-audioworklet-interface/processors/gain-processor.js';
+
+ // Context for testing
+ let context;
+
+ audit.define('Create Test Worklet', (task, should) => {
+
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ const testDuration = 4 * 128 / sampleRate;
+
+ context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'Construction of AudioWorklet')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('AudioWorklet k-rate AudioParam', (task, should) => {
+ let src = new ConstantSourceNode(context);
+
+ let kRateNode = new AudioWorkletNode(context, 'gain');
+
+ src.connect(kRateNode).connect(context.destination);
+
+ let kRateParam = kRateNode.parameters.get('gain');
+ kRateParam.automationRate = 'k-rate';
+
+ // Automate the gain
+ kRateParam.setValueAtTime(0, 0);
+ kRateParam.linearRampToValueAtTime(
+ 10, context.length / context.sampleRate);
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let output = audioBuffer.getChannelData(0);
+
+ // Verify that the output from the worklet is step-wise
+ // constant.
+ for (let k = 0; k < output.length; k += 128) {
+ should(
+ output.slice(k, k + 128),
+ ` k-rate output [${k}: ${k + 127}]`)
+ .beConstantValueOf(output[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html
new file mode 100644
index 0000000000..ab9df8740f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html
@@ -0,0 +1,456 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam Inputs for BiquadFilterNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // sampleRate and duration are fairly arbitrary. We use low values to
+ // limit the complexity of the test.
+ let sampleRate = 8192;
+ let testDuration = 0.5;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'Frequency AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test frequency AudioParam using a lowpass filter whose bandwidth
+ // is initially larger than the oscillator frequency. Then automate
+ // the frequency to 0 so that the output of the filter is 0 (because
+ // the cutoff is 0).
+ let oscFrequency = 440;
+
+ let options = {
+ sampleRate: sampleRate,
+ paramName: 'frequency',
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ filterOptions: {type: 'lowpass', frequency: 0},
+ autoStart:
+ {method: 'setValueAtTime', args: [2 * oscFrequency, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [0, testDuration / 4]
+ }
+ };
+
+ let buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+ let halfLength = expected.length / 2;
+
+ // Sanity check. The expected output should not be zero for
+ // the first half, but should be zero for the second half
+ // (because the filter bandwidth is exactly 0).
+ const prefix = 'Expected k-rate frequency with automation';
+
+ should(
+ expected.slice(0, halfLength),
+ `${prefix} output[0:${halfLength - 1}]`)
+ .notBeConstantValueOf(0);
+ should(
+ expected.slice(expected.length),
+ `${prefix} output[${halfLength}:]`)
+ .beConstantValueOf(0);
+
+ // Outputs should be the same. Break the message into two
+ // parts so we can see the expected outputs.
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Q AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test Q AudioParam. Use a bandpass filter whose center frequency
+ // is fairly far from the oscillator frequency. Then start with a Q
+ // value of 0 (so everything goes through) and then increase Q to
+ // some large value such that the out-of-band signals are basically
+ // cutoff.
+ let frequency = 440;
+ let oscFrequency = 4 * frequency;
+
+ let options = {
+ sampleRate: sampleRate,
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ paramName: 'Q',
+ filterOptions: {type: 'bandpass', frequency: frequency, Q: 0},
+ autoStart: {method: 'setValueAtTime', args: [0, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [100, testDuration / 4]
+ }
+ };
+
+ const buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Outputs should be the same
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Gain AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test gain AudioParam. Use a peaking filter with a large Q so the
+ // peak is narrow with a center frequency the same as the oscillator
+ // frequency. Start with a gain of 0 so everything goes through and
+ // then ramp the gain down to -100 so that the oscillator is
+ // filtered out.
+ let oscFrequency = 4 * 440;
+
+ let options = {
+ sampleRate: sampleRate,
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ paramName: 'gain',
+ filterOptions:
+ {type: 'peaking', frequency: oscFrequency, Q: 100, gain: 0},
+ autoStart: {method: 'setValueAtTime', args: [0, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [-100, testDuration / 4]
+ }
+ };
+
+ const buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Outputs should be the same
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Detune AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test detune AudioParam. The basic idea is the same as the
+ // frequency test above, but insteda of automating the frequency, we
+ // automate the detune value so that initially the filter cutuff is
+ // unchanged and then changing the detune until the cutoff goes to 1
+ // Hz, which would cause the oscillator to be filtered out.
+ let oscFrequency = 440;
+ let filterFrequency = 5 * oscFrequency;
+
+ // For a detune value d, the computed frequency, fc, of the filter
+ // is fc = f*2^(d/1200), where f is the frequency of the filter. Or
+ // d = 1200*log2(fc/f). Compute the detune value to produce a final
+ // cutoff frequency of 1 Hz.
+ let detuneEnd = 1200 * Math.log2(1 / filterFrequency);
+
+ let options = {
+ sampleRate: sampleRate,
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ paramName: 'detune',
+ filterOptions: {
+ type: 'lowpass',
+ frequency: filterFrequency,
+ detune: 0,
+ gain: 0
+ },
+ autoStart: {method: 'setValueAtTime', args: [0, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [detuneEnd, testDuration / 4]
+ }
+ };
+
+ const buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Outputs should be the same
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define('All k-rate inputs', async (task, should) => {
+ // Test the case where all AudioParams are set to k-rate with an input
+ // to each AudioParam. Similar to the above tests except all the params
+ // are k-rate.
+ let testFrames = testDuration * sampleRate;
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, sampleRate: sampleRate, length: testFrames});
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ // The peaking filter uses all four AudioParams, so this is the node to
+ // test.
+ let filterOptions =
+ {type: 'peaking', frequency: 0, detune: 0, gain: 0, Q: 0};
+ let refNode;
+ should(
+ () => refNode = new BiquadFilterNode(context, filterOptions),
+ `Create: refNode = new BiquadFilterNode(context, ${
+ JSON.stringify(filterOptions)})`)
+ .notThrow();
+
+ let tstNode;
+ should(
+ () => tstNode = new BiquadFilterNode(context, filterOptions),
+ `Create: tstNode = new BiquadFilterNode(context, ${
+ JSON.stringify(filterOptions)})`)
+ .notThrow();
+ ;
+
+ // Make all the AudioParams k-rate.
+ ['frequency', 'Q', 'gain', 'detune'].forEach(param => {
+ should(
+ () => refNode[param].automationRate = 'k-rate',
+ `Set rate: refNode[${param}].automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => tstNode[param].automationRate = 'k-rate',
+ `Set rate: tstNode[${param}].automationRate = 'k-rate'`)
+ .notThrow();
+ });
+
+ // One input for each AudioParam.
+ let mod = {};
+ ['frequency', 'Q', 'gain', 'detune'].forEach(param => {
+ should(
+ () => mod[param] = new ConstantSourceNode(context, {offset: 0}),
+ `Create: mod[${
+ param}] = new ConstantSourceNode(context, {offset: 0})`)
+ .notThrow();
+ ;
+ should(
+ () => mod[param].offset.automationRate = 'a-rate',
+ `Set rate: mod[${param}].offset.automationRate = 'a-rate'`)
+ .notThrow();
+ });
+
+ // Set up automations for refNode. We want to start the filter with
+ // parameters that let the oscillator signal through more or less
+ // untouched. Then change the filter parameters to filter out the
+ // oscillator. What happens in between doesn't reall matter for this
+ // test. Hence, set the initial parameters with a center frequency well
+ // above the oscillator and a Q and gain of 0 to pass everthing.
+ [['frequency', [4 * src.frequency.value, 0]], ['Q', [0, 0]],
+ ['gain', [0, 0]], ['detune', [4 * 1200, 0]]]
+ .forEach(param => {
+ should(
+ () => refNode[param[0]].setValueAtTime(...param[1]),
+ `Automate 0: refNode.${param[0]}.setValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ should(
+ () => mod[param[0]].offset.setValueAtTime(...param[1]),
+ `Automate 0: mod[${param[0]}].offset.setValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ });
+
+ // Now move the filter frequency to the oscillator frequency with a high
+ // Q and very low gain to remove the oscillator signal.
+ [['frequency', [src.frequency.value, testDuration / 4]],
+ ['Q', [40, testDuration / 4]], ['gain', [-100, testDuration / 4]], [
+ 'detune', [0, testDuration / 4]
+ ]].forEach(param => {
+ should(
+ () => refNode[param[0]].linearRampToValueAtTime(...param[1]),
+ `Automate 1: refNode[${param[0]}].linearRampToValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ should(
+ () => mod[param[0]].offset.linearRampToValueAtTime(...param[1]),
+ `Automate 1: mod[${param[0]}].offset.linearRampToValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ });
+
+ // Connect everything
+ src.connect(refNode).connect(merger, 0, 0);
+ src.connect(tstNode).connect(merger, 0, 1);
+
+ src.start();
+ for (let param in mod) {
+ should(
+ () => mod[param].connect(tstNode[param]),
+ `Connect: mod[${param}].connect(tstNode.${param})`)
+ .notThrow();
+ }
+
+ for (let param in mod) {
+ should(() => mod[param].start(), `Start: mod[${param}].start()`)
+ .notThrow();
+ }
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Sanity check that the output isn't all zeroes.
+ should(actual, 'All k-rate AudioParams').notBeConstantValueOf(0);
+ should(actual, 'All k-rate AudioParams').beCloseToArray(expected, {
+ absoluteThreshold: 0
+ });
+
+ task.done();
+ });
+
+ audit.run();
+
+ async function doTest(should, options) {
+ // Test that a k-rate AudioParam with an input reads the input value and
+ // is actually k-rate.
+ //
+ // A refNode is created with an automation timeline. This is the
+ // expected output.
+ //
+ // The testNode is the same, but it has a node connected to the k-rate
+ // AudioParam. The input to the node is an a-rate ConstantSourceNode
+ // whose output is automated in exactly the same was as the refNode. If
+ // the test passes, the outputs of the two nodes MUST match exactly.
+
+ // The options argument MUST contain the following members:
+ // sampleRate - the sample rate for the offline context
+ // testDuration - duration of the offline context, in sec.
+ // paramName - the name of the AudioParam to be tested
+ // oscFrequency - frequency of oscillator source
+ // filterOptions - options used to construct the BiquadFilterNode
+ // autoStart - information about how to start the automation
+ // autoEnd - information about how to end the automation
+ //
+ // The autoStart and autoEnd options are themselves dictionaries with
+ // the following required members:
+ // method - name of the automation method to be applied
+ // args - array of arguments to be supplied to the method.
+ let {
+ sampleRate,
+ paramName,
+ oscFrequency,
+ autoStart,
+ autoEnd,
+ testDuration,
+ filterOptions
+ } = options;
+
+ let testFrames = testDuration * sampleRate;
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, sampleRate: sampleRate, length: testFrames});
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Any calls to |should| are meant to be informational so we can see
+ // what nodes are created and the automations used.
+ let src;
+
+ // Create the source.
+ should(
+ () => {
+ src = new OscillatorNode(context, {frequency: oscFrequency});
+ },
+ `${paramName}: new OscillatorNode(context, {frequency: ${
+ oscFrequency}})`)
+ .notThrow();
+
+ // The refNode automates the AudioParam with k-rate automations, no
+ // inputs.
+ let refNode;
+ should(
+ () => {
+ refNode = new BiquadFilterNode(context, filterOptions);
+ },
+ `Reference BiquadFilterNode(c, ${JSON.stringify(filterOptions)})`)
+ .notThrow();
+
+ refNode[paramName].automationRate = 'k-rate';
+
+ // Set up automations for the reference node.
+ should(
+ () => {
+ refNode[paramName][autoStart.method](...autoStart.args);
+ },
+ `refNode.${paramName}.${autoStart.method}(${autoStart.args})`)
+ .notThrow();
+ should(
+ () => {
+ refNode[paramName][autoEnd.method](...autoEnd.args);
+ },
+ `refNode.${paramName}.${autoEnd.method}.(${autoEnd.args})`)
+ .notThrow();
+
+ // The tstNode does the same automation, but it comes from the input
+ // connected to the AudioParam.
+ let tstNode;
+ should(
+ () => {
+ tstNode = new BiquadFilterNode(context, filterOptions);
+ },
+ `Test BiquadFilterNode(context, ${JSON.stringify(filterOptions)})`)
+ .notThrow();
+ tstNode[paramName].automationRate = 'k-rate';
+
+ // Create the input to the AudioParam of the test node. The output of
+ // this node MUST have the same set of automations as the reference
+ // node, and MUST be a-rate to make sure we're handling k-rate inputs
+ // correctly.
+ let mod = new ConstantSourceNode(context);
+ mod.offset.automationRate = 'a-rate';
+ should(
+ () => {
+ mod.offset[autoStart.method](...autoStart.args);
+ },
+ `${paramName}: mod.offset.${autoStart.method}(${autoStart.args})`)
+ .notThrow();
+ should(
+ () => {
+ mod.offset[autoEnd.method](...autoEnd.args);
+ },
+ `${paramName}: mod.offset.${autoEnd.method}(${autoEnd.args})`)
+ .notThrow();
+
+ // Create graph
+ mod.connect(tstNode[paramName]);
+ src.connect(refNode).connect(merger, 0, 0);
+ src.connect(tstNode).connect(merger, 0, 1);
+
+ // Run!
+ src.start();
+ mod.start();
+ return context.startRendering();
+ }
+
+ function checkForSameOutput(should, paramName, actual, expected) {
+ let halfLength = expected.length / 2;
+
+ // Outputs should be the same. We break the check into halves so we can
+ // see the expected outputs. Mostly for a simple visual check that the
+ // output from the second half is small because the tests generally try
+ // to filter out the signal so that the last half of the output is
+ // small.
+ should(
+ actual.slice(0, halfLength),
+ `k-rate ${paramName} with input: output[0,${halfLength}]`)
+ .beCloseToArray(
+ expected.slice(0, halfLength), {absoluteThreshold: 0});
+ should(
+ actual.slice(halfLength),
+ `k-rate ${paramName} with input: output[${halfLength}:]`)
+ .beCloseToArray(expected.slice(halfLength), {absoluteThreshold: 0});
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html
new file mode 100644
index 0000000000..85ae4f175f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html
@@ -0,0 +1,111 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams of BiquadFilterNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {task: 'BiquadFilter-0', label: 'Biquad k-rate AudioParams (all)'},
+ (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ nodeName: 'BiquadFilterNode',
+ nodeOptions: {type: 'lowpass'},
+ prefix: 'All k-rate params',
+ // Set all AudioParams to k-rate
+ rateSettings: [
+ {name: 'Q', value: 'k-rate'},
+ {name: 'detune', value: 'k-rate'},
+ {name: 'frequency', value: 'k-rate'},
+ {name: 'gain', value: 'k-rate'},
+ ],
+ // Automate just the frequency
+ automations: [{
+ name: 'frequency',
+ methods: [
+ {name: 'setValueAtTime', options: [350, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [0, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ // Define a test where we verify that a k-rate audio param produces
+ // different results from an a-rate audio param for each of the audio
+ // params of a biquad.
+ //
+ // Each entry gives the name of the AudioParam, an initial value to be
+ // used with setValueAtTime, and a final value to be used with
+ // linearRampToValueAtTime. (See |doTest| for details as well.)
+
+ [{name: 'Q',
+ initial: 1,
+ final: 10
+ },
+ {name: 'detune',
+ initial: 0,
+ final: 1200
+ },
+ {name: 'frequency',
+ initial: 350,
+ final: 0
+ },
+ {name: 'gain',
+ initial: 10,
+ final: 0
+ }].forEach(paramProperty => {
+ audit.define('Biquad k-rate ' + paramProperty.name, (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ nodeName: 'BiquadFilterNode',
+ nodeOptions: {type: 'peaking', Q: 1, gain: 10},
+ prefix: `k-rate ${paramProperty.name}`,
+ // Just set the frequency to k-rate
+ rateSettings: [
+ {name: paramProperty.name, value: 'k-rate'},
+ ],
+ // Automate just the given AudioParam
+ automations: [{
+ name: paramProperty.name,
+ methods: [
+ {name: 'setValueAtTime', options: [paramProperty.initial, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [paramProperty.final, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html
new file mode 100644
index 0000000000..730f03e561
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html
@@ -0,0 +1,139 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with Inputs</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Must be power of two to eliminate round-off
+ const sampleRate = 8192;
+
+ // Arbitrary duration that doesn't need to be too long to verify k-rate
+ // automations. Probably should be at least a few render quanta.
+ const testDuration = 8 * RENDER_QUANTUM_FRAMES / sampleRate;
+
+ // Test k-rate GainNode.gain is k-rate
+ audit.define(
+ {label: 'Gain', description: 'k-rate GainNode.gain'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new ConstantSourceNode(context);
+
+ createTestSubGraph(context, src, merger, 'GainNode', 'gain');
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < actual.length;
+ k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `gain[${k}:${k + RENDER_QUANTUM_FRAMES}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ // Test k-rate StereoPannerNode.pan is k-rate
+ audit.define(
+ {label: 'StereoPanner', description: 'k-rate StereoPannerNode.pan'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new ConstantSourceNode(context);
+
+ createTestSubGraph(
+ context, src, merger, 'StereoPannerNode', 'pan', {
+ testModSetup: node => {
+ node.offset.setValueAtTime(-1, 0);
+ node.offset.linearRampToValueAtTime(1, testDuration);
+ }
+ });
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < actual.length; k += 128) {
+ should(actual.slice(k, k + 128), `pan[${k}:${k + 128}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ function createTestSubGraph(
+ context, src, merger, nodeName, paramName, options) {
+ // The test node which has its AudioParam set up for k-rate autmoations.
+ let tstNode = new window[nodeName](context);
+
+ if (options && options.setups) {
+ options.setups(tstNode);
+ }
+ tstNode[paramName].automationRate = 'k-rate';
+
+ // Modulating signal for the test node. Just a linear ramp. This is
+ // connected to the AudioParam of the tstNode.
+ let tstMod = new ConstantSourceNode(context);
+ if (options && options.testModSetup) {
+ options.testModSetup(tstMod);
+ } else {
+ tstMod.offset.linearRampToValueAtTime(context.length, testDuration);
+ }
+
+ tstMod.connect(tstNode[paramName]);
+ src.connect(tstNode).connect(merger, 0, 0);
+
+ // The ref node is the same type of node as the test node, but uses
+ // a-rate automation. However, the modulating signal is k-rate. This
+ // causes the input to the audio param to be constant over a render,
+ // which is basically the same as making the audio param be k-rate.
+ let refNode = new window[nodeName](context);
+ let refMod = new ConstantSourceNode(context);
+ refMod.offset.automationRate = 'k-rate';
+ if (options && options.testModSetup) {
+ options.testModSetup(refMod);
+ } else {
+ refMod.offset.linearRampToValueAtTime(context.length, testDuration);
+ }
+
+ refMod.connect(refNode[paramName]);
+ src.connect(refNode).connect(merger, 0, 1);
+
+ tstMod.start();
+ refMod.start();
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html
new file mode 100644
index 0000000000..0bea5c91f8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html
@@ -0,0 +1,176 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of ConstantSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('ConstantSource k-rate offset', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ let testDuration = 4 * 128 / sampleRate;
+
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ sourceNodeName: 'none',
+ verifyPieceWiseConstant: true,
+ nodeName: 'ConstantSourceNode',
+ prefix: 'k-rate offset',
+ rateSettings: [{name: 'offset', value: 'k-rate'}],
+ automations: [{
+ name: 'offset',
+ methods: [
+ {name: 'setValueAtTime', options: [0, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [10, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ // Parameters for the For the following tests.
+
+ // Must be power of two to eliminate round-off
+ const sampleRate8k = 8192;
+
+ // Arbitrary duration that doesn't need to be too long to verify k-rate
+ // automations. Probably should be at least a few render quanta.
+ const testDuration = 8 * RENDER_QUANTUM_FRAMES / sampleRate8k;
+
+ // Basic test that k-rate ConstantSourceNode.offset is k-rate. This is
+ // the basis for all of the following tests, so make sure it's right.
+ audit.define(
+ {
+ label: 'ConstantSourceNode.offset k-rate automation',
+ description:
+ 'Explicitly test ConstantSourceNode.offset k-rate automation is k-rate'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate8k,
+ length: testDuration * sampleRate8k
+ });
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // k-rate ConstantSource.offset using a linear ramp starting at 0
+ // and incrementing by 1 for each frame.
+ let src = new ConstantSourceNode(context, {offset: 0});
+ src.offset.automationRate = 'k-rate';
+
+ src.offset.setValueAtTime(0, 0);
+ src.offset.linearRampToValueAtTime(context.length, testDuration);
+
+ src.connect(merger, 0, 0);
+
+ src.start();
+
+ // a-rate ConstantSource using the same ramp as above.
+ let refSrc = new ConstantSourceNode(context, {offset: 0});
+
+ refSrc.offset.setValueAtTime(0, 0);
+ refSrc.offset.linearRampToValueAtTime(context.length, testDuration);
+
+ refSrc.connect(merger, 0, 1);
+
+ refSrc.start();
+
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < actual.length;
+ k += RENDER_QUANTUM_FRAMES) {
+ // Verify that the k-rate output is constant over the render
+ // and that it matches the value of the a-rate value at the
+ // beginning of the render.
+ should(
+ actual.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `k-rate ConstantSource.offset: output[${k}:${
+ k + RENDER_QUANTUM_FRAMES}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ // This test verifies that a k-rate input to the ConstantSourceNode.offset
+ // works just as if we set the AudioParam to be k-rate. This is the basis
+ // of the following tests, so make sure it works.
+ audit.define(
+ {
+ label: 'ConstantSource.offset',
+ description: 'Verify k-rate automation matches k-rate input'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate8k,
+ length: testDuration * sampleRate8k
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let tstSrc = new ConstantSourceNode(context);
+ let tstMod = new ConstantSourceNode(context);
+ tstSrc.offset.automationRate = 'k-rate';
+ tstMod.offset.linearRampToValueAtTime(context.length, testDuration);
+
+ tstMod.connect(tstSrc.offset)
+ tstSrc.connect(merger, 0, 0);
+
+ let refSrc = new ConstantSourceNode(context);
+ let refMod = new ConstantSourceNode(context);
+ refMod.offset.linearRampToValueAtTime(context.length, testDuration);
+ refMod.offset.automationRate = 'k-rate';
+
+ refMod.connect(refSrc.offset);
+ refSrc.connect(merger, 0, 1);
+
+ tstSrc.start();
+ tstMod.start();
+ refSrc.start();
+ refMod.start();
+
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < context.length;
+ k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `ConstantSource.offset k-rate input: output[${k}:${
+ k + RENDER_QUANTUM_FRAMES}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html
new file mode 100644
index 0000000000..fcf66f2e3e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html
@@ -0,0 +1,156 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with inputs for DelayNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Power-of-two to eliminate round-off in computing time and frames, but
+ // is otherwise arbitrary.
+ const sampleRate = 8192;
+
+ // Arbitrary duration except it must be greater than or equal to 1.
+ const testDuration = 1.5;
+
+ audit.define(
+ {label: 'delayTime', description: `DelayNode delayTime k-rate input`},
+ async (task, should) => {
+ // Two channels: 0 = test result, 1 = expected result.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Test the DelayNode by having a reference node (refNode) that uses
+ // k-rate automations of delayTime. The test node (testNode) sets
+ // delayTime to k-rate with a connected input that has the same
+ // automation vlaues as the reference node. The test passes if the
+ // output from each node is identical to each other.
+
+ // Just some non-constant source.
+ let src = new OscillatorNode(context);
+
+ // The end value and time for the linear ramp. These values are
+ // chosen so that the delay advances faster than real time.
+ let endValue = 1.125;
+ let endTime = 1;
+
+ let refNode;
+
+ should(
+ () => refNode = new DelayNode(context),
+ `refNode = new DelayNode(context)`)
+ .notThrow();
+
+ should(
+ () => refNode.delayTime.automationRate = 'k-rate',
+ `refNode.delayTime.automationRate = 'k-rate'`)
+ .notThrow();
+
+ should(
+ () => refNode.delayTime.setValueAtTime(0, 0),
+ `refNode.delayTime.setValueAtTime(0, 0)`)
+ .notThrow();
+
+ should(
+ () => refNode.delayTime.linearRampToValueAtTime(
+ endValue, endTime),
+ `refNode.delayTime.linearRampToValueAtTime(${endValue}, ${
+ endTime})`)
+ .notThrow();
+
+ let testNode;
+
+ should(
+ () => testNode = new DelayNode(context),
+ `testNode = new DelayNode(context)`)
+ .notThrow();
+
+ should(
+ () => testNode.delayTime.automationRate = 'k-rate',
+ `testNode.delayTime.automationRate = 'k-rate'`)
+ .notThrow();
+
+ let testMod;
+
+ should(
+ () => testMod = new ConstantSourceNode(context),
+ `testMod = new ConstantSourceNode(context)`)
+ .notThrow();
+
+ should(
+ () => testMod.offset.setValueAtTime(0, 0),
+ `testMod.offset.setValueAtTime(0, 0)`)
+ .notThrow();
+
+ should(
+ () => testMod.offset.linearRampToValueAtTime(endValue, endTime),
+ `testMod.offset.linearRampToValueAtTime(${endValue}, ${
+ endTime})`)
+ .notThrow();
+
+ should(
+ () => testMod.connect(testNode.delayTime),
+ `testMod.connect(testNode.delayTime)`)
+ .notThrow();
+
+ // Connect up everything and go!
+ src.connect(testNode).connect(merger, 0, 0);
+ src.connect(refNode).connect(merger, 0, 1);
+
+ src.start();
+ testMod.start();
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Quick sanity check that output isn't zero. This means we messed
+ // up the connections or automations or the buffer source.
+ should(expected, `Expected k-rate delayTime AudioParam with input`)
+ .notBeConstantValueOf(0);
+ should(actual, `Actual k-rate delayTime AudioParam with input`)
+ .notBeConstantValueOf(0);
+
+ // Quick sanity check. The amount of delay after one render is
+ // endValue * 128 / sampleRate. But after 1 render, time has
+ // advanced 128/sampleRate. Hence, the delay exceeds the time by
+ // (endValue - 1)*128/sampleRate sec or (endValue - 1)*128 frames.
+ // This means the output must be EXACTLY zero for this many frames
+ // in the second render.
+ let zeroFrames = (endValue - 1) * RENDER_QUANTUM_FRAMES;
+ should(
+ actual.slice(
+ RENDER_QUANTUM_FRAMES, RENDER_QUANTUM_FRAMES + zeroFrames),
+ `output[${RENDER_QUANTUM_FRAMES}, ${
+ RENDER_QUANTUM_FRAMES + zeroFrames - 1}]`)
+ .beConstantValueOf(0);
+ should(
+ actual.slice(
+ RENDER_QUANTUM_FRAMES + zeroFrames,
+ 2 * RENDER_QUANTUM_FRAMES),
+ `output[${RENDER_QUANTUM_FRAMES + zeroFrames}, ${
+ 2 * RENDER_QUANTUM_FRAMES - 1}]`)
+ .notBeConstantValueOf(0);
+
+ // The expected and actual results must be EXACTLY the same.
+ should(actual, `k-rate delayTime AudioParam with input`)
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+ });
+
+ audit.run();
+ </script>
+ </body>
+ </html> \ No newline at end of file
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html
new file mode 100644
index 0000000000..5465c39430
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html
@@ -0,0 +1,49 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of DelayNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Test k-rate DelayNode', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+
+ doTest(context, should, {
+ nodeName: 'DelayNode',
+ nodeOptions: null,
+ prefix: 'DelayNode',
+ // Set all AudioParams to k-rate
+ rateSettings: [{name: 'delayTime', value: 'k-rate'}],
+ // Automate just the frequency
+ automations: [{
+ name: 'delayTime',
+ methods: [
+ {name: 'setValueAtTime', options: [0, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [.5, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html
new file mode 100644
index 0000000000..c1755cd155
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html
@@ -0,0 +1,145 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with inputs for DynamicsCompressorNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Fairly abitrary sampleRate and somewhat duration
+ const sampleRate = 48000;
+ const testDuration = 0.25;
+
+ ['attack', 'knee', 'ratio', 'release', 'threshold'].forEach(param => {
+ audit.define(
+ {label: param, description: `Dynamics compressor ${param}`},
+ async (task, should) => {
+ await doTest(should, {prefix: task.label, paramName: param});
+ task.done();
+ });
+ });
+
+ audit.run();
+
+ async function doTest(should, options) {
+ // Test k-rate automation of DynamicsCompressorNode with connected
+ // input.
+ //
+ // A reference compressor node is created with an automation on the
+ // selected AudioParam. For simplicity, we just use a linear ramp from
+ // the minValue to the maxValue of the AudioParam.
+ //
+ // The test node has an input signal connected to the AudioParam. This
+ // input signal is created to match the automation on the reference
+ // node.
+ //
+ // Finally, the output from the two nodes must be identical if k-rate
+ // inputs are working correctly.
+ //
+ // Options parameter is a dictionary with the following required
+ // members:
+ // prefix - prefix to use for the messages.
+ // paramName - Name of the AudioParam to be tested
+
+ let {prefix, paramName} = options;
+
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Use an oscillator for the source. Pretty arbitrary parameters.
+ let src =
+ new OscillatorNode(context, {type: 'sawtooth', frequency: 440});
+
+ // Create the reference and test nodes.
+ let refNode;
+ let tstNode;
+
+ should(
+ () => refNode = new DynamicsCompressorNode(context),
+ `${prefix}: refNode = new DynamicsCompressorNode(context)`)
+ .notThrow();
+
+ let tstOptions = {};
+ tstOptions[paramName] = refNode[paramName].minValue;
+ should(
+ () => tstNode = new DynamicsCompressorNode(context, tstOptions),
+ `${prefix}: tstNode = new DynamicsCompressorNode(context, ${
+ JSON.stringify(tstOptions)})`)
+ .notThrow();
+
+
+ // Automate the AudioParam of the reference node with a linear ramp
+ should(
+ () => refNode[paramName].setValueAtTime(
+ refNode[paramName].minValue, 0),
+ `${prefix}: refNode[${paramName}].setValueAtTime(refNode[${
+ paramName}].minValue, 0)`)
+ .notThrow();
+
+ should(
+ () => refNode[paramName].linearRampToValueAtTime(
+ refNode[paramName].maxValue, testDuration),
+ `${prefix}: refNode[${paramName}].linearRampToValueAtTime(refNode[${
+ paramName}].minValue, ${testDuration})`)
+ .notThrow();
+
+
+ // Create the input node and automate it so that it's output when added
+ // to the intrinsic value of the AudioParam we get the same values as
+ // the automations on the ference node. We need to do it this way
+ // because the ratio AudioParam has a nominal range of [1, 20] so we
+ // can't just set the value to 0, which is what we'd normally do.
+ let mod;
+ should(
+ () => mod = new ConstantSourceNode(context, {offset: 0}),
+ `${prefix}: mod = new ConstantSourceNode(context, {offset: 0})`)
+ .notThrow();
+ let endValue =
+ refNode[paramName].maxValue - refNode[paramName].minValue;
+ should(
+ () => mod.offset.setValueAtTime(0, 0),
+ `${prefix}: mod.offset.setValueAtTime(0, 0)`)
+ .notThrow();
+ should(
+ () => mod.offset.linearRampToValueAtTime(endValue, testDuration),
+ `${prefix}: mod.offset.linearRampToValueAtTime(${endValue}, ${
+ testDuration})`)
+ .notThrow();
+
+ // Connect up everything.
+ should(
+ () => mod.connect(tstNode[paramName]),
+ `${prefix}: mod.connect(tstNode[${paramName}])`)
+ .notThrow();
+
+ src.connect(refNode).connect(merger, 0, 0);
+ src.connect(tstNode).connect(merger, 0, 1);
+
+ // Go!
+ src.start();
+ mod.start();
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // The expected and actual results must be EXACTLY the same.
+ should(actual, `k-rate ${paramName} AudioParam with input`)
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html
new file mode 100644
index 0000000000..887d9f78db
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html
@@ -0,0 +1,47 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of GainNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Test k-rate GainNode', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+
+ doTest(context, should, {
+ nodeName: 'GainNode',
+ nodeOptions: null,
+ prefix: 'GainNode',
+ // Set AudioParam to k-rate
+ rateSettings: [{name: 'gain', value: 'k-rate'}],
+ // Automate
+ automations: [{
+ name: 'gain',
+ methods: [
+ {name: 'setValueAtTime', options: [1, 0]},
+ {name: 'linearRampToValueAtTime', options: [0, testDuration]}
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html
new file mode 100644
index 0000000000..475b364367
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html
@@ -0,0 +1,578 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ k-rate AudioParams with inputs for OscillatorNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Sample rate must be a power of two to eliminate round-off when
+ // computing time from frames and vice versa. Using a non-power of two
+ // will work, but the thresholds below will not be zero. They're probably
+ // closer to 1e-5 or so, but if everything is working correctly, the
+ // outputs really should be exactly equal.
+ const sampleRate = 8192;
+
+ // Fairly arbitrary but short duration to limit runtime.
+ const testFrames = 5 * RENDER_QUANTUM_FRAMES;
+ const testDuration = testFrames / sampleRate;
+
+ audit.define(
+ {label: 'Test 1', description: 'k-rate frequency input'},
+ async (task, should) => {
+ // Test that an input to the frequency AudioParam set to k-rate
+ // works.
+
+ // Fairly arbitrary start and end frequencies for the automation.
+ const freqStart = 100;
+ const freqEnd = 2000;
+
+ let refSetup = (context) => {
+ let srcRef = new OscillatorNode(context, {frequency: 0});
+
+ should(
+ () => srcRef.frequency.automationRate = 'k-rate',
+ `${task.label}: srcRef.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => srcRef.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => srcRef.frequency.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: srcRef.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ return srcRef;
+ };
+
+ let testSetup = (context) => {
+ let srcTest = new OscillatorNode(context, {frequency: 0});
+ should(
+ () => srcTest.frequency.automationRate = 'k-rate',
+ `${task.label}: srcTest.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+
+ return srcTest;
+ };
+
+ let modSetup = (context) => {
+ let mod = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => mod.offset.setValueAtTime(freqStart, 0),
+ `${task.label}: modFreq.offset.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () =>
+ mod.offset.linearRampToValueAtTime(freqEnd, testDuration),
+ `${task.label}: modFreq.offset.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ // This node is going to be connected to the frequency AudioParam.
+ return {frequency: mod};
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate frequency with input',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 2', description: 'k-rate detune input'},
+ async (task, should) => {
+ // Test that an input to the detune AudioParam set to k-rate works.
+ // Threshold experimentally determined. It should be probably not
+ // be much larger than 5e-5. or something is not right.
+
+ // Fairly arbitrary start and end detune values for automation.
+ const detuneStart = 0;
+ const detuneEnd = 2000;
+
+ let refSetup = (context) => {
+ let srcRef = new OscillatorNode(context, {detune: 0});
+
+ should(
+ () => srcRef.detune.automationRate = 'k-rate',
+ `${task.label}: srcRef.detune.automationRate = 'k-rate'`)
+ .notThrow();
+
+ should(
+ () => srcRef.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => srcRef.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return srcRef;
+ };
+
+ let testSetup = (context) => {
+ let srcTest = new OscillatorNode(context, {detune: 0});
+
+ should(
+ () => srcTest.detune.automationRate = 'k-rate',
+ `${task.label}: srcTest.detune.automationRate = 'k-rate'`)
+ .notThrow();
+
+ return srcTest;
+ };
+
+ let modSetup = (context) => {
+ let mod = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => mod.offset.setValueAtTime(detuneStart, 0),
+ `${task.label}: modDetune.offset.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => mod.offset.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: modDetune.offset.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return {detune: mod};
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate detune with input',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 3',
+ description: 'k-rate frequency input with a-rate detune'
+ },
+ async (task, should) => {
+ // Test OscillatorNode with a k-rate frequency with input and an
+ // a-rate detune iwth automations.
+
+ // Fairly arbitrary start and end values for the frequency and
+ // detune automations.
+ const freqStart = 100;
+ const freqEnd = 2000;
+ const detuneStart = 0;
+ const detuneEnd = -2000;
+
+ let refSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0});
+
+ // Set up k-rate frequency and a-rate detune
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcRef.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ 2000, testDuration),
+ `${task.label}: srcRef.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let testSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0});
+
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcTest.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcTest.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcTest.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let modSetup = (context) => {
+ let mod = {};
+ mod['frequency'] = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => mod['frequency'].offset.setValueAtTime(freqStart, 0),
+ `${task.label}: modFreq.offset.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+
+ should(
+ () => mod['frequency'].offset.linearRampToValueAtTime(
+ 2000, testDuration),
+ `${task.label}: modFreq.offset.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ return mod;
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate frequency input with a-rate detune',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 4',
+ description: 'a-rate frequency with k-rate detune input'
+ },
+ async (task, should) => {
+ // Test OscillatorNode with an a-rate frequency with automations and
+ // a k-rate detune with input.
+
+ // Fairly arbitrary start and end values for the frequency and
+ // detune automations.
+ const freqStart = 100;
+ const freqEnd = 2000;
+ const detuneStart = 0;
+ const detuneEnd = -2000;
+
+ let refSetup = (context) => {
+ let node = new OscillatorNode(context, {detune: 0});
+
+ // Set up a-rate frequency and k-rate detune
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ 2000, testDuration),
+ `${task.label}: srcRef.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcRef.detune.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let testSetup = (context) => {
+ let node = new OscillatorNode(context, {detune: 0});
+
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcTest.detune.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcTest.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: srcTest.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let modSetup = (context) => {
+ let mod = {};
+ const name = 'detune';
+
+ mod['detune'] = new ConstantSourceNode(context, {offset: 0});
+ should(
+ () => mod[name].offset.setValueAtTime(detuneStart, 0),
+ `${task.label}: modDetune.offset.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+
+ should(
+ () => mod[name].offset.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: modDetune.offset.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return mod;
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate detune input with a-rate frequency',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 5',
+ description: 'k-rate inputs for frequency and detune'
+ },
+ async (task, should) => {
+ // Test OscillatorNode with k-rate frequency and detune with inputs
+ // on both.
+
+ // Fairly arbitrary start and end values for the frequency and
+ // detune automations.
+ const freqStart = 100;
+ const freqEnd = 2000;
+ const detuneStart = 0;
+ const detuneEnd = -2000;
+
+ let refSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0, detune: 0});
+
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcRef.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.setValueAtTime(${freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: srcRef;.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcRef.detune.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let testSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0, detune: 0});
+
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcTest.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcTest.detune.automationRate = 'k-rate'`)
+ .notThrow();
+
+ return node;
+ };
+
+ let modSetup = (context) => {
+ let modF = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => modF.offset.setValueAtTime(freqStart, 0),
+ `${task.label}: modFreq.offset.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => modF.offset.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: modFreq.offset.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ let modD = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => modD.offset.setValueAtTime(detuneStart, 0),
+ `${task.label}: modDetune.offset.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => modD.offset.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: modDetune.offset.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return {frequency: modF, detune: modD};
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate inputs for both frequency and detune',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.run();
+
+ async function testParams(should, options) {
+ // Test a-rate and k-rate AudioParams of an OscillatorNode.
+ //
+ // |options| should be a dictionary with these members:
+ // prefix - prefix to use for messages
+ // summary - message to be printed with the final results
+ // setupRefOsc - function returning the reference oscillator
+ // setupTestOsc - function returning the test oscillator
+ // setupMod - function returning nodes to be connected to the
+ // AudioParams.
+ //
+ // |setupRefOsc| and |setupTestOsc| are given the context and each
+ // method is expected to create an OscillatorNode with the appropriate
+ // automations for testing. The constructed OscillatorNode is returned.
+ //
+ // The reference oscillator
+ // should automate the desired AudioParams at the appropriate automation
+ // rate, and the output is the expected result.
+ //
+ // The test oscillator should set up the AudioParams but expect the
+ // AudioParam(s) have an input that matches the automation for the
+ // reference oscillator.
+ //
+ // |setupMod| must create one or two ConstantSourceNodes with exactly
+ // the same automations as used for the reference oscillator. This node
+ // is used as the input to an AudioParam of the test oscillator. This
+ // function returns a dictionary whose members are named 'frequency' and
+ // 'detune'. The name indicates which AudioParam the constant source
+ // node should be connected to.
+
+ // Two channels: 0 = reference signal, 1 = test signal
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // The reference oscillator.
+ let srcRef = options.setupRefOsc(context);
+
+ // The test oscillator.
+ let srcTest = options.setupTestOsc(context);
+
+ // Inputs to AudioParam.
+ let mod = options.setupMod(context);
+
+ if (mod['frequency']) {
+ should(
+ () => mod['frequency'].connect(srcTest.frequency),
+ `${options.prefix}: modFreq.connect(srcTest.frequency)`)
+ .notThrow();
+ mod['frequency'].start()
+ }
+
+ if (mod['detune']) {
+ should(
+ () => mod['detune'].connect(srcTest.detune),
+ `${options.prefix}: modDetune.connect(srcTest.detune)`)
+ .notThrow();
+ mod['detune'].start()
+ }
+
+ srcRef.connect(merger, 0, 0);
+ srcTest.connect(merger, 0, 1);
+
+ srcRef.start();
+ srcTest.start();
+
+ let buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // The output of the reference and test oscillator should be
+ // exactly equal because the AudioParam values should be exactly
+ // equal.
+ should(actual, options.summary).beCloseToArray(expected, {
+ absoluteThreshold: 0
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html
new file mode 100644
index 0000000000..6803f55eab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html
@@ -0,0 +1,88 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams of OscillatorNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ let testDuration = 4 * 128 / sampleRate;
+
+ [{name: 'detune', initial: 0, final: 1200}, {
+ name: 'frequency',
+ initial: 440,
+ final: sampleRate / 2
+ }].forEach(paramProperty => {
+ audit.define(
+ 'Oscillator k-rate ' + paramProperty.name, (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+ let inverter = new GainNode(context, {gain: -1});
+ inverter.connect(merger, 0, 2);
+
+ let kRateNode = new OscillatorNode(context);
+ let aRateNode = new OscillatorNode(context);
+
+ kRateNode.connect(merger, 0, 0);
+ aRateNode.connect(merger, 0, 1);
+
+ kRateNode.connect(merger, 0, 2);
+ aRateNode.connect(inverter);
+
+ // Set the rate
+ kRateNode[paramProperty.name].automationRate = 'k-rate';
+
+ // Automate the offset
+ kRateNode[paramProperty.name].setValueAtTime(
+ paramProperty.initial, 0);
+ kRateNode[paramProperty.name].linearRampToValueAtTime(
+ paramProperty.final, testDuration);
+
+ aRateNode[paramProperty.name].setValueAtTime(
+ paramProperty.initial, 0);
+ aRateNode[paramProperty.name].linearRampToValueAtTime(
+ paramProperty.final, testDuration);
+
+ kRateNode.start();
+ aRateNode.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let kRateOut = audioBuffer.getChannelData(0);
+ let aRateOut = audioBuffer.getChannelData(1);
+ let diff = audioBuffer.getChannelData(2);
+
+ // Verify that the outputs are different.
+ should(
+ diff,
+ 'k-rate ' + paramProperty.name +
+ ': Difference between a-rate and k-rate outputs')
+ .notBeConstantValueOf(0);
+
+ })
+ .then(() => task.done());
+ });
+ });
+
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html
new file mode 100644
index 0000000000..001cf63bd3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html
@@ -0,0 +1,238 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ k-rate AudioParams with inputs for PannerNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </title>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'Panner x', description: 'k-rate input'},
+ async (task, should) => {
+ await testPannerParams(should, {param: 'positionX'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Panner y', description: 'k-rate input'},
+ async (task, should) => {
+ await testPannerParams(should, {param: 'positionY'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Panner z', description: 'k-rate input'},
+ async (task, should) => {
+ await testPannerParams(should, {param: 'positionZ'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Listener x', description: 'k-rate input'},
+ async (task, should) => {
+ await testListenerParams(should, {param: 'positionX'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Listener y', description: 'k-rate input'},
+ async (task, should) => {
+ await testListenerParams(should, {param: 'positionY'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Listener z', description: 'k-rate input'},
+ async (task, should) => {
+ await testListenerParams(should, {param: 'positionZ'});
+ task.done();
+ });
+
+ audit.run();
+
+ async function testPannerParams(should, options) {
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+ const testFrames = 5 * RENDER_QUANTUM_FRAMES;
+ let testDuration = testFrames / sampleRate;
+ // Four channels needed because the first two are for the output of
+ // the reference panner, and the next two are for the test panner.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 4,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Create a stereo source out of two mono sources
+ let src0 = new ConstantSourceNode(context, {offset: 1});
+ let src1 = new ConstantSourceNode(context, {offset: 2});
+ let src = new ChannelMergerNode(context, {numberOfInputs: 2});
+ src0.connect(src, 0, 0);
+ src1.connect(src, 0, 1);
+
+ let finalPosition = 100;
+
+ // Reference panner node with k-rate AudioParam automations. The
+ // output of this panner is the reference output.
+ let refNode = new PannerNode(context);
+ // Initialize the panner location to somewhat arbitrary values.
+ refNode.positionX.value = 1;
+ refNode.positionY.value = 50;
+ refNode.positionZ.value = -25;
+
+ // Set the AudioParam under test with the appropriate automations.
+ refNode[options.param].automationRate = 'k-rate';
+ refNode[options.param].setValueAtTime(1, 0);
+ refNode[options.param].linearRampToValueAtTime(
+ finalPosition, testDuration);
+ let refSplit = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ // Test panner node with k-rate AudioParam with inputs.
+ let tstNode = new PannerNode(context);
+ tstNode.positionX.value = 1;
+ tstNode.positionY.value = 50;
+ tstNode.positionZ.value = -25;
+ tstNode[options.param].value = 0;
+ tstNode[options.param].automationRate = 'k-rate';
+ let tstSplit = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ // The input to the AudioParam. It must have the same automation
+ // sequence as used by refNode. And must be a-rate to demonstrate
+ // the k-rate effect of the AudioParam.
+ let mod = new ConstantSourceNode(context, {offset: 0});
+ mod.offset.setValueAtTime(1, 0);
+ mod.offset.linearRampToValueAtTime(finalPosition, testDuration);
+
+ mod.connect(tstNode[options.param]);
+
+ src.connect(refNode).connect(refSplit);
+ src.connect(tstNode).connect(tstSplit);
+
+ refSplit.connect(merger, 0, 0);
+ refSplit.connect(merger, 1, 1);
+ tstSplit.connect(merger, 0, 2);
+ tstSplit.connect(merger, 1, 3);
+
+ mod.start();
+ src0.start();
+ src1.start();
+
+ const buffer = await context.startRendering();
+ let expected0 = buffer.getChannelData(0);
+ let expected1 = buffer.getChannelData(1);
+ let actual0 = buffer.getChannelData(2);
+ let actual1 = buffer.getChannelData(3);
+
+ should(expected0, `Panner: ${options.param}: Expected output channel 0`)
+ .notBeConstantValueOf(expected0[0]);
+ should(expected1, `${options.param}: Expected output channel 1`)
+ .notBeConstantValueOf(expected1[0]);
+
+ // Verify output is a stair step because positionX is k-rate,
+ // and no other AudioParam is changing.
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual0.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Panner: ${options.param}: Channel 0 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(actual0[k]);
+ }
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual1.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Panner: ${options.param}: Channel 1 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(actual1[k]);
+ }
+
+ should(actual0, `Panner: ${options.param}: Actual output channel 0`)
+ .beCloseToArray(expected0, {absoluteThreshold: 0});
+ should(actual1, `Panner: ${options.param}: Actual output channel 1`)
+ .beCloseToArray(expected1, {absoluteThreshold: 0});
+ }
+
+ async function testListenerParams(should, options) {
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+ const testFrames = 5 * RENDER_QUANTUM_FRAMES;
+ let testDuration = testFrames / sampleRate;
+ // Four channels needed because the first two are for the output of
+ // the reference panner, and the next two are for the test panner.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ // Create a stereo source out of two mono sources
+ let src0 = new ConstantSourceNode(context, {offset: 1});
+ let src1 = new ConstantSourceNode(context, {offset: 2});
+ let src = new ChannelMergerNode(context, {numberOfInputs: 2});
+ src0.connect(src, 0, 0);
+ src1.connect(src, 0, 1);
+
+ let finalPosition = 100;
+
+ // Reference panner node with k-rate AudioParam automations. The
+ // output of this panner is the reference output.
+ let panner = new PannerNode(context);
+ panner.positionX.value = 10;
+ panner.positionY.value = 50;
+ panner.positionZ.value = -25;
+
+ src.connect(panner);
+
+ let mod = new ConstantSourceNode(context, {offset: 0});
+ mod.offset.setValueAtTime(1, 0);
+ mod.offset.linearRampToValueAtTime(finalPosition, testDuration);
+
+ context.listener[options.param].automationRate = 'k-rate';
+ mod.connect(context.listener[options.param]);
+
+ panner.connect(context.destination);
+
+ src0.start();
+ src1.start();
+ mod.start();
+
+ const buffer = await context.startRendering();
+ let c0 = buffer.getChannelData(0);
+ let c1 = buffer.getChannelData(1);
+
+ // Verify output is a stair step because positionX is k-rate,
+ // and no other AudioParam is changing.
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ c0.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Listener: ${options.param}: Channel 0 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(c0[k]);
+ }
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ c1.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Listener: ${options.param}: Channel 1 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(c1[k]);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html
new file mode 100644
index 0000000000..60200b2471
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html
@@ -0,0 +1,178 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams of PannerNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Define a test where we verify that a k-rate audio param produces
+ // different results from an a-rate audio param for each of the audio
+ // params of a biquad.
+ //
+ // Each entry gives the name of the AudioParam, an initial value to be
+ // used with setValueAtTime, and a final value to be used with
+ // linearRampToValueAtTime. (See |doTest| for details as well.)
+
+ [{name: 'positionX', initial: 0, final: 1000},
+ {name: 'positionY', initial: 0, final: 1000},
+ {name: 'orientationX', initial: 1, final: 10},
+ {name: 'orientationY', initial: 1, final: 10},
+ {name: 'orientationZ', initial: 1, final: 10},
+ ].forEach(paramProperty => {
+ audit.define('Panner k-rate ' + paramProperty.name, (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 5 * 128 / sampleRate;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ sourceNodeName: 'ConstantSourceNode',
+ verifyPieceWiseConstant: true,
+ nodeName: 'PannerNode',
+ // Make the source directional so orientation matters, and set some
+ // defaults for the position and orientation so that we're not on an
+ // axis where the azimuth and elevation might be constant when
+ // moving one of the AudioParams.
+ nodeOptions: {
+ distanceModel: 'inverse',
+ coneOuterAngle: 360,
+ coneInnerAngle: 0,
+ positionX: 1,
+ positionY: 1,
+ positionZ: 1,
+ orientationX: 0,
+ orientationY: 1,
+ orientationZ: 1
+ },
+ prefix: `k-rate ${paramProperty.name}`,
+ // Just set the frequency to k-rate
+ rateSettings: [
+ {name: paramProperty.name, value: 'k-rate'},
+ ],
+ // Automate just the given AudioParam
+ automations: [{
+ name: paramProperty.name,
+ methods: [
+ {name: 'setValueAtTime', options: [paramProperty.initial, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [paramProperty.final, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+ });
+
+ // Test k-rate automation of the listener. The intial and final
+ // automation values are pretty arbitrary, except that they should be such
+ // that the panner and listener produces non-constant output.
+ [{name: 'positionX', initial: [1, 0], final: [1000, 1]},
+ {name: 'positionY', initial: [1, 0], final: [1000, 1]},
+ {name: 'positionZ', initial: [1, 0], final: [1000, 1]},
+ {name: 'forwardX', initial: [-1, 0], final: [1, 1]},
+ {name: 'forwardY', initial: [-1, 0], final: [1, 1]},
+ {name: 'forwardZ', initial: [-1, 0], final: [1, 1]},
+ {name: 'upX', initial: [-1, 0], final: [1000, 1]},
+ {name: 'upY', initial: [-1, 0], final: [1000, 1]},
+ {name: 'upZ', initial: [-1, 0], final: [1000, 1]},
+ ].forEach(paramProperty => {
+ audit.define(
+ 'Listener k-rate ' + paramProperty.name, (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 5 * 128 / sampleRate;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doListenerTest(context, should, {
+ param: paramProperty.name,
+ initial: paramProperty.initial,
+ final: paramProperty.final
+ }).then(() => task.done());
+ });
+ });
+
+ audit.run();
+
+ function doListenerTest(context, should, options) {
+ let src = new ConstantSourceNode(context);
+ let panner = new PannerNode(context, {
+ distanceModel: 'inverse',
+ coneOuterAngle: 360,
+ coneInnerAngle: 10,
+ positionX: 10,
+ positionY: 10,
+ positionZ: 10,
+ orientationX: 1,
+ orientationY: 1,
+ orientationZ: 1
+ });
+
+ src.connect(panner).connect(context.destination);
+
+ src.start();
+
+ let listener = context.listener;
+
+ // Set listener properties to "random" values so that motion on one of
+ // the attributes actually changes things relative to the panner
+ // location. And the up and forward directions should have a simple
+ // relationship between them.
+ listener.positionX.value = -1;
+ listener.positionY.value = 1;
+ listener.positionZ.value = -1;
+ listener.forwardX.value = -1;
+ listener.forwardY.value = 1;
+ listener.forwardZ.value = -1;
+ // Make the up vector not parallel or perpendicular to the forward and
+ // position vectors so that automations of the up vector produce
+ // noticeable differences.
+ listener.upX.value = 1;
+ listener.upY.value = 1;
+ listener.upZ.value = 2;
+
+ let audioParam = listener[options.param];
+ audioParam.automationRate = 'k-rate';
+
+ let prefix = `Listener ${options.param}`;
+ should(audioParam.automationRate, prefix + '.automationRate')
+ .beEqualTo('k-rate');
+ should(() => {
+ audioParam.setValueAtTime(...options.initial);
+ }, prefix + `.setValueAtTime(${options.initial})`).notThrow();
+ should(() => {
+ audioParam.linearRampToValueAtTime(...options.final);
+ }, prefix + `.linearRampToValueAtTime(${options.final})`).notThrow();
+
+ return context.startRendering().then(renderedBuffer => {
+ let prefix = `Listener k-rate ${options.param}: `;
+ let output = renderedBuffer.getChannelData(0);
+ // Sanity check that the output isn't constant.
+ should(output, prefix + `Output`).notBeConstantValueOf(output[0]);
+
+ // Verify that the output is constant over each render quantum
+ for (let k = 0; k < output.length; k += 128) {
+ should(
+ output.slice(k, k + 128), prefix + `Output [${k}, ${k + 127}]`)
+ .beConstantValueOf(output[k]);
+ }
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html
new file mode 100644
index 0000000000..06905b89c3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html
@@ -0,0 +1,48 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of StereoPannerNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Test k-rate StereoPannerNode', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ nodeName: 'StereoPannerNode',
+ nodeOptions: null,
+ prefix: 'StereoPannerNode',
+ // Set all AudioParams to k-rate.
+ rateSettings: [{name: 'pan', value: 'k-rate'}],
+ // Automate just the frequency.
+ automations: [{
+ name: 'pan',
+ methods: [
+ {name: 'setValueAtTime', options: [0, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [.5, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html
new file mode 100644
index 0000000000..e9b8f0accb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html
@@ -0,0 +1,92 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test Flushing of NaN to Zero in AudioParams</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // See
+ // https://webaudio.github.io/web-audio-api/#computation-of-value.
+ //
+ // The computed value must replace NaN values in the output with
+ // the default value of the param.
+ audit.define('AudioParam NaN', async (task, should) => {
+ // For testing, we only need a small number of frames; and
+ // a low sample rate is perfectly fine. Use two channels.
+ // The first channel is for the AudioParam output. The
+ // second channel is for the AudioParam input.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: 256, sampleRate: 8192});
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // A constant source with a huge value.
+ let mod = new ConstantSourceNode(context, {offset: 1e30});
+
+ // Gain nodes with a huge positive gain and huge negative
+ // gain. Combined with the huge offset in |mod|, the
+ // output of the gain nodes are +Infinity and -Infinity.
+ let gainPos = new GainNode(context, {gain: 1e30});
+ let gainNeg = new GainNode(context, {gain: -1e30});
+
+ mod.connect(gainPos);
+ mod.connect(gainNeg);
+
+ // Connect these to the second merger channel. This is a
+ // sanity check that the AudioParam input really is NaN.
+ gainPos.connect(merger, 0, 1);
+ gainNeg.connect(merger, 0, 1);
+
+ // Source whose AudioParam is connected to the graph
+ // that produces NaN values. Use a non-default value offset
+ // just in case something is wrong we get default for some
+ // other reason.
+ let src = new ConstantSourceNode(context, {offset: 100});
+
+ gainPos.connect(src.offset);
+ gainNeg.connect(src.offset);
+
+ // AudioParam output goes to channel 1 of the destination.
+ src.connect(merger, 0, 0);
+
+ // Let's go!
+ mod.start();
+ src.start();
+
+ let buffer = await context.startRendering();
+
+ let input = buffer.getChannelData(1);
+ let output = buffer.getChannelData(0);
+
+ // Have to test manually for NaN values in the input because
+ // NaN fails all comparisons.
+ let isNaN = true;
+ for (let k = 0; k < input.length; ++k) {
+ if (!Number.isNaN(input[k])) {
+ isNaN = false;
+ break;
+ }
+ }
+
+ should(isNaN, 'AudioParam input contains only NaN').beTrue();
+
+ // Output of the AudioParam should have all NaN values
+ // replaced by the default.
+ should(output, 'AudioParam output')
+ .beConstantValueOf(src.offset.defaultValue);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html
new file mode 100644
index 0000000000..c81c3ad23e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html
@@ -0,0 +1,70 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+ <head>
+ <title>Test exponentialRampToValue with end time in the past</title>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test exponentialRampToValue with end time in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setTargetAtTime with a time in the past
+ test.gain.exponentialRampToValueAtTime(
+ 0.1, 0.5 * context.currentTime);
+ test.gain.exponentialRampToValueAtTime(0.9, 1.0);
+
+ reference.gain.exponentialRampToValueAtTime(
+ 0.1, context.currentTime);
+ reference.gain.exponentialRampToValueAtTime(0.9, 1.0);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html
new file mode 100644
index 0000000000..9f5e55fe55
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html
@@ -0,0 +1,70 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+ <head>
+ <title>Test linearRampToValue with end time in the past</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test linearRampToValue with end time in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setTargetAtTime with a time in the past
+ test.gain.linearRampToValueAtTime(
+ 0.1, 0.5 * context.currentTime);
+ test.gain.linearRampToValueAtTime(0.9, 1.0);
+
+ reference.gain.linearRampToValueAtTime(
+ 0.1, context.currentTime);
+ reference.gain.linearRampToValueAtTime(0.9, 1.0);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html
new file mode 100644
index 0000000000..41a37bdb91
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html
@@ -0,0 +1,80 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+ <head>
+ <title>Test setTargetAtTime with start time in the past</title>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test setTargetAtTime with start time in the past'
+ },
+ (task, should) => {
+ // Use a sample rate that is a power of two to eliminate round-off
+ // in computing the currentTime.
+ let context = new OfflineAudioContext(2, 16384, 16384);
+ let source = new ConstantSourceNode(context);
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ let test = new GainNode(context);
+ let reference = new GainNode(context);
+
+ source.connect(test);
+ source.connect(reference);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setTargetAtTime with a time in the past
+ test.gain.setTargetAtTime(0.1, 0.5*context.currentTime, 0.1);
+ reference.gain.setTargetAtTime(0.1, context.currentTime, 0.1);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html
new file mode 100644
index 0000000000..32cdc6307f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html
@@ -0,0 +1,74 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Test setValueAtTime with startTime in the past</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test setValueAtTime with startTime in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ // Use a ramp of slope 1 per frame to measure time.
+ // The end value is the extent of exact precision in single
+ // precision float.
+ const rampEnd = context.length - suspendFrame;
+ const rampEndSeconds = context.length / context.sampleRate;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setValueAtTime with a time in the past
+ test.gain.setValueAtTime(0.0, 0.5 * context.currentTime);
+ test.gain.linearRampToValueAtTime(rampEnd, rampEndSeconds);
+
+ reference.gain.setValueAtTime(0.0, context.currentTime);
+ reference.gain.linearRampToValueAtTime(
+ rampEnd, rampEndSeconds);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html
new file mode 100644
index 0000000000..451b6ea829
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html
@@ -0,0 +1,67 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test SetValueCurve with start time in the past</title>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ </body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test SetValueCurve with start time in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setValueAtTime with a time in the past
+ test.gain.setValueCurveAtTime(
+ new Float32Array([1.0, 0.1]), 0.5 * context.currentTime,
+ 1.0);
+ reference.gain.setValueCurveAtTime(
+ new Float32Array([1.0, 0.1]), context.currentTime, 1.0);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js
new file mode 100644
index 0000000000..bbda190f09
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js
@@ -0,0 +1,29 @@
+// Create an audio graph on an offline context that consists of a
+// constant source and two gain nodes. One of the nodes is the node te
+// be tested and the other is the reference node. The output from the
+// test node is in channel 0 of the offline context; the reference
+// node is in channel 1.
+//
+// Returns a dictionary with the context, source node, the test node,
+// and the reference node.
+function setupRetrospectiveGraph() {
+ // Use a sample rate that is a power of two to eliminate round-off
+ // in computing the currentTime.
+ let context = new OfflineAudioContext(2, 16384, 16384);
+ let source = new ConstantSourceNode(context);
+
+ let test = new GainNode(context);
+ let reference = new GainNode(context);
+
+ source.connect(test);
+ source.connect(reference);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ return {context: context, source: source, test: test, reference: reference};
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html
new file mode 100644
index 0000000000..2ed076cccf
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html
@@ -0,0 +1,93 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+ <head>
+ <title>Test convergence of setTargetAtTime</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src='/webaudio/resources/audio-param.js'></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {task: 'setTargetAtTime', label: 'convergence handled correctly'},
+ (task, should) => {
+ // Two channels:
+ // 0 - actual result
+ // 1 - expected result
+ const context = new OfflineAudioContext(
+ {numberOfChannels: 2, sampleRate: 8000, length: 8000});
+
+ const merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Construct test source that will have tha AudioParams being tested
+ // to verify that the AudioParams are working correctly.
+ let src;
+
+ should(
+ () => src = new ConstantSourceNode(context),
+ 'src = new ConstantSourceNode(context)')
+ .notThrow();
+
+ src.connect(merger, 0, 0);
+ src.offset.setValueAtTime(1, 0);
+
+ const timeConstant = 0.01;
+
+ // testTime must be at least 10*timeConstant. Also, this must not
+ // lie on a render boundary.
+ const testTime = 0.15;
+ const rampEnd = testTime + 0.001;
+
+ should(
+ () => src.offset.setTargetAtTime(0.5, 0.01, timeConstant),
+ `src.offset.setTargetAtTime(0.5, 0.01, ${timeConstant})`)
+ .notThrow();
+ should(
+ () => src.offset.setValueAtTime(0.5, testTime),
+ `src.offset.setValueAtTime(0.5, ${testTime})`)
+ .notThrow();
+ should(
+ () => src.offset.linearRampToValueAtTime(1, rampEnd),
+ `src.offset.linearRampToValueAtTime(1, ${rampEnd})`)
+ .notThrow();
+
+ // The reference node that will generate the expected output. We do
+ // the same automations, except we don't apply the setTarget
+ // automation.
+ const refSrc = new ConstantSourceNode(context);
+ refSrc.connect(merger, 0, 1);
+
+ refSrc.offset.setValueAtTime(0.5, 0);
+ refSrc.offset.setValueAtTime(0.5, testTime);
+ refSrc.offset.linearRampToValueAtTime(1, rampEnd);
+
+ src.start();
+ refSrc.start();
+
+ context.startRendering()
+ .then(audio => {
+ const actual = audio.getChannelData(0);
+ const expected = audio.getChannelData(1);
+
+ // Just verify that the actual output matches the expected
+ // starting a little bit before testTime.
+ let testFrame =
+ Math.floor(testTime * context.sampleRate) - 128;
+ should(actual.slice(testFrame), `output[${testFrame}:]`)
+ .beCloseToArray(
+ expected.slice(testFrame),
+ {relativeThreshold: 4.1724e-6});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html
new file mode 100644
index 0000000000..827aeeabd4
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<title>Test setTargetAtTime after an event in the same processing block</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ const bufferSize = 179;
+ const valueStartOffset = 42;
+ const targetStartOffset = 53;
+ const sampleRate = 48000;
+ const scheduledValue = -0.5;
+
+ var context = new OfflineAudioContext(1, bufferSize, sampleRate);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(scheduledValue, valueStartOffset/sampleRate);
+ gain.gain.setTargetAtTime(scheduledValue, targetStartOffset/sampleRate,
+ 128/sampleRate);
+ gain.connect(context.destination);
+
+ // Apply unit DC signal to gain node.
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+ source.connect(gain);
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, bufferSize, "output buffer length");
+ var output = buffer.getChannelData(0);
+ var i = 0;
+ for (; i < valueStartOffset; ++i) {
+ // "Its default value is 1."
+ assert_equals(output[i], 1.0, "default gain at sample " + i);
+ }
+ for (; i < buffer.length; ++i) {
+ // "If the next event (having time T1) after this SetValue event is
+ // not of type LinearRampToValue or ExponentialRampToValue, then, for
+ // T0≤t<T1: v(t)=V".
+ // "Start exponentially approaching the target value at the given time
+ // with a rate having the given time constant."
+ // The target is the same value, and so the SetValue value continues.
+ assert_equals(output[i], scheduledValue,
+ "scheduled value at sample " + i);
+ }
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html
new file mode 100644
index 0000000000..36fde2b996
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<title>Test setValueAtTime with start time not on a block boundary</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ const bufferSize = 200;
+ const offset = 65;
+ const sampleRate = 48000;
+ const scheduledValue = -2.0;
+
+ var context = new OfflineAudioContext(1, bufferSize, sampleRate);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(scheduledValue, offset/sampleRate);
+ gain.connect(context.destination);
+
+ // Apply unit DC signal to gain node.
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+ source.connect(gain);
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, bufferSize, "output buffer length");
+ var output = buffer.getChannelData(0);
+ var i = 0;
+ for (; i < offset; ++i) {
+ // "Its default value is 1."
+ assert_equals(output[i], 1.0, "default gain at sample " + i);
+ }
+ for (; i < buffer.length; ++i) {
+ // "If there are no more events after this SetValue event, then for
+ // t≥T0, v(t)=V, where T0 is the startTime parameter and V is the
+ // value parameter."
+ assert_equals(output[i], scheduledValue,
+ "scheduled value at sample " + i);
+ }
+ });
+});
+</script>