diff options
Diffstat (limited to 'testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface')
10 files changed, 1328 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html new file mode 100644 index 0000000000..9067e6869b --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html @@ -0,0 +1,278 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audionode-channel-rules.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/mixing-rules.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + let context = 0; + // Use a power of two to eliminate round-off converting frames to time. + let sampleRate = 32768; + let renderNumberOfChannels = 8; + let singleTestFrameLength = 8; + let testBuffers; + + // A list of connections to an AudioNode input, each of which is to be + // used in one or more specific test cases. Each element in the list is a + // string, with the number of connections corresponding to the length of + // the string, and each character in the string is from '1' to '8' + // representing a 1 to 8 channel connection (from an AudioNode output). + + // For example, the string "128" means 3 connections, having 1, 2, and 8 + // channels respectively. + + let connectionsList = [ + '1', '2', '3', '4', '5', '6', '7', '8', '11', '12', '14', '18', '111', + '122', '123', '124', '128' + ]; + + // A list of mixing rules, each of which will be tested against all of the + // connections in connectionsList. + let mixingRulesList = [ + { + channelCount: 2, + channelCountMode: 'max', + channelInterpretation: 'speakers' + }, + { + channelCount: 4, + channelCountMode: 'clamped-max', + channelInterpretation: 'speakers' + }, + + // Test up-down-mix to some explicit speaker layouts. + { + channelCount: 1, + channelCountMode: 'explicit', + channelInterpretation: 'speakers' + }, + { + channelCount: 2, + channelCountMode: 'explicit', + channelInterpretation: 'speakers' + }, + { + channelCount: 4, + channelCountMode: 'explicit', + channelInterpretation: 'speakers' + }, + { + channelCount: 6, + channelCountMode: 'explicit', + channelInterpretation: 'speakers' + }, + + { + channelCount: 2, + channelCountMode: 'max', + channelInterpretation: 'discrete' + }, + { + channelCount: 4, + channelCountMode: 'clamped-max', + channelInterpretation: 'discrete' + }, + { + channelCount: 4, + channelCountMode: 'explicit', + channelInterpretation: 'discrete' + }, + { + channelCount: 8, + channelCountMode: 'explicit', + channelInterpretation: 'discrete' + }, + ]; + + let numberOfTests = mixingRulesList.length * connectionsList.length; + + // Print out the information for an individual test case. + function printTestInformation( + testNumber, actualBuffer, expectedBuffer, frameLength, frameOffset) { + let actual = stringifyBuffer(actualBuffer, frameLength); + let expected = + stringifyBuffer(expectedBuffer, frameLength, frameOffset); + debug('TEST CASE #' + testNumber + '\n'); + debug('actual channels:\n' + actual); + debug('expected channels:\n' + expected); + } + + function scheduleTest( + testNumber, connections, channelCount, channelCountMode, + channelInterpretation) { + let mixNode = context.createGain(); + mixNode.channelCount = channelCount; + mixNode.channelCountMode = channelCountMode; + mixNode.channelInterpretation = channelInterpretation; + mixNode.connect(context.destination); + + for (let i = 0; i < connections.length; ++i) { + let connectionNumberOfChannels = + connections.charCodeAt(i) - '0'.charCodeAt(0); + + let source = context.createBufferSource(); + // Get a buffer with the right number of channels, converting from + // 1-based to 0-based index. + let buffer = testBuffers[connectionNumberOfChannels - 1]; + source.buffer = buffer; + source.connect(mixNode); + + // Start at the right offset. + let sampleFrameOffset = testNumber * singleTestFrameLength; + let time = sampleFrameOffset / sampleRate; + source.start(time); + } + } + + function checkTestResult( + renderedBuffer, testNumber, connections, channelCount, + channelCountMode, channelInterpretation, should) { + let s = 'connections: ' + connections + ', ' + channelCountMode; + + // channelCount is ignored in "max" mode. + if (channelCountMode == 'clamped-max' || + channelCountMode == 'explicit') { + s += '(' + channelCount + ')'; + } + + s += ', ' + channelInterpretation; + + let computedNumberOfChannels = computeNumberOfChannels( + connections, channelCount, channelCountMode); + + // Create a zero-initialized silent AudioBuffer with + // computedNumberOfChannels. + let destBuffer = context.createBuffer( + computedNumberOfChannels, singleTestFrameLength, + context.sampleRate); + + // Mix all of the connections into the destination buffer. + for (let i = 0; i < connections.length; ++i) { + let connectionNumberOfChannels = + connections.charCodeAt(i) - '0'.charCodeAt(0); + let sourceBuffer = + testBuffers[connectionNumberOfChannels - 1]; // convert from + // 1-based to + // 0-based index + + if (channelInterpretation == 'speakers') { + speakersSum(sourceBuffer, destBuffer); + } else if (channelInterpretation == 'discrete') { + discreteSum(sourceBuffer, destBuffer); + } else { + alert('Invalid channel interpretation!'); + } + } + + // Use this when debugging mixing rules. + // printTestInformation(testNumber, renderedBuffer, destBuffer, + // singleTestFrameLength, sampleFrameOffset); + + // Validate that destBuffer matches the rendered output. We need to + // check the rendered output at a specific sample-frame-offset + // corresponding to the specific test case we're checking for based on + // testNumber. + + let sampleFrameOffset = testNumber * singleTestFrameLength; + for (let c = 0; c < renderNumberOfChannels; ++c) { + let renderedData = renderedBuffer.getChannelData(c); + for (let frame = 0; frame < singleTestFrameLength; ++frame) { + let renderedValue = renderedData[frame + sampleFrameOffset]; + + let expectedValue = 0; + if (c < destBuffer.numberOfChannels) { + let expectedData = destBuffer.getChannelData(c); + expectedValue = expectedData[frame]; + } + + // We may need to add an epsilon in the comparison if we add more + // test vectors. + if (renderedValue != expectedValue) { + let message = s + 'rendered: ' + renderedValue + + ' expected: ' + expectedValue + ' channel: ' + c + + ' frame: ' + frame; + // testFailed(s); + should(renderedValue, s).beEqualTo(expectedValue); + return; + } + } + } + + should(true, s).beTrue(); + } + + function checkResult(buffer, should) { + // Sanity check result. + should(buffer.length, 'Rendered number of frames') + .beEqualTo(numberOfTests * singleTestFrameLength); + should(buffer.numberOfChannels, 'Rendered number of channels') + .beEqualTo(renderNumberOfChannels); + + // Check all the tests. + let testNumber = 0; + for (let m = 0; m < mixingRulesList.length; ++m) { + let mixingRules = mixingRulesList[m]; + for (let i = 0; i < connectionsList.length; ++i, ++testNumber) { + checkTestResult( + buffer, testNumber, connectionsList[i], + mixingRules.channelCount, mixingRules.channelCountMode, + mixingRules.channelInterpretation, should); + } + } + } + + audit.define( + {label: 'test', description: 'Channel mixing rules for AudioNodes'}, + function(task, should) { + + // Create 8-channel offline audio context. Each test will render 8 + // sample-frames starting at sample-frame position testNumber * 8. + let totalFrameLength = numberOfTests * singleTestFrameLength; + context = new OfflineAudioContext( + renderNumberOfChannels, totalFrameLength, sampleRate); + + // Set destination to discrete mixing. + context.destination.channelCount = renderNumberOfChannels; + context.destination.channelCountMode = 'explicit'; + context.destination.channelInterpretation = 'discrete'; + + // Create test buffers from 1 to 8 channels. + testBuffers = new Array(); + for (let i = 0; i < renderNumberOfChannels; ++i) { + testBuffers[i] = createShiftedImpulseBuffer( + context, i + 1, singleTestFrameLength); + } + + // Schedule all the tests. + let testNumber = 0; + for (let m = 0; m < mixingRulesList.length; ++m) { + let mixingRules = mixingRulesList[m]; + for (let i = 0; i < connectionsList.length; ++i, ++testNumber) { + scheduleTest( + testNumber, connectionsList[i], mixingRules.channelCount, + mixingRules.channelCountMode, + mixingRules.channelInterpretation); + } + } + + // Render then check results. + // context.oncomplete = checkResult; + context.startRendering().then(buffer => { + checkResult(buffer, should); + task.done(); + }); + ; + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html new file mode 100644 index 0000000000..02caea667b --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html @@ -0,0 +1,165 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audionode-connect-method-chaining.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + // AudioNode dictionary with associated arguments. + let nodeDictionary = [ + {name: 'Analyser'}, {name: 'BiquadFilter'}, {name: 'BufferSource'}, + {name: 'ChannelMerger', args: [6]}, + {name: 'ChannelSplitter', args: [6]}, {name: 'Convolver'}, + {name: 'Delay', args: []}, {name: 'DynamicsCompressor'}, {name: 'Gain'}, + {name: 'Oscillator'}, {name: 'Panner'}, + {name: 'ScriptProcessor', args: [512, 1, 1]}, {name: 'StereoPanner'}, + {name: 'WaveShaper'} + ]; + + + function verifyReturnedNode(should, config) { + should( + config.destination === config.returned, + 'The return value of ' + config.desc + ' matches the destination ' + + config.returned.constructor.name) + .beEqualTo(true); + } + + // Test utility for batch method checking: in order to test 3 method + // signatures, so we create 3 dummy destinations. + // 1) .connect(GainNode) + // 2) .connect(BiquadFilterNode, output) + // 3) .connect(ChannelMergerNode, output, input) + function testConnectMethod(context, should, options) { + let source = + context['create' + options.name].apply(context, options.args); + let sourceName = source.constructor.name; + + let destination1 = context.createGain(); + verifyReturnedNode(should, { + source: source, + destination: destination1, + returned: source.connect(destination1), + desc: sourceName + '.connect(' + destination1.constructor.name + ')' + }); + + let destination2 = context.createBiquadFilter(); + verifyReturnedNode(should, { + source: source, + destination: destination2, + returned: source.connect(destination2, 0), + desc: + sourceName + '.connect(' + destination2.constructor.name + ', 0)' + }); + + let destination3 = context.createChannelMerger(); + verifyReturnedNode(should, { + source: source, + destination: destination3, + returned: source.connect(destination3, 0, 1), + desc: sourceName + '.connect(' + destination3.constructor.name + + ', 0, 1)' + }); + } + + + let audit = Audit.createTaskRunner(); + + // Task: testing entries from the dictionary. + audit.define('from-dictionary', (task, should) => { + let context = new AudioContext(); + + for (let i = 0; i < nodeDictionary.length; i++) + testConnectMethod(context, should, nodeDictionary[i]); + + task.done(); + }); + + // Task: testing Media* nodes. + audit.define('media-group', (task, should) => { + let context = new AudioContext(); + + // Test MediaElementSourceNode needs an <audio> element. + let mediaElement = document.createElement('audio'); + testConnectMethod( + context, should, + {name: 'MediaElementSource', args: [mediaElement]}); + + // MediaStreamAudioDestinationNode has no output so it connect method + // chaining isn't possible. + + // MediaStreamSourceNode requires 'stream' object to be constructed, + // which is a part of MediaStreamDestinationNode. + let streamDestination = context.createMediaStreamDestination(); + let stream = streamDestination.stream; + testConnectMethod( + context, should, {name: 'MediaStreamSource', args: [stream]}); + + task.done(); + }); + + // Task: test the exception thrown by invalid operation. + audit.define('invalid-operation', (task, should) => { + let contextA = new AudioContext(); + let contextB = new AudioContext(); + let gain1 = contextA.createGain(); + let gain2 = contextA.createGain(); + + // Test if the first connection throws correctly. The first gain node + // does not have the second output, so it should throw. + should(function() { + gain1.connect(gain2, 1).connect(contextA.destination); + }, 'Connecting with an invalid output').throw(DOMException, 'IndexSizeError'); + + // Test if the second connection throws correctly. The contextB's + // destination is not compatible with the nodes from contextA, thus the + // first connection succeeds but the second one should throw. + should( + function() { + gain1.connect(gain2).connect(contextB.destination); + }, + 'Connecting to a node from the different context') + .throw(DOMException, 'InvalidAccessError'); + + task.done(); + }); + + // Task: verify if the method chaining actually works. + audit.define('verification', (task, should) => { + // We pick the lowest sample rate allowed to run the test efficiently. + let context = new OfflineAudioContext(1, 128, 8000); + + let constantBuffer = createConstantBuffer(context, 1, 1.0); + + let source = context.createBufferSource(); + source.buffer = constantBuffer; + source.loop = true; + + let gain1 = context.createGain(); + gain1.gain.value = 0.5; + let gain2 = context.createGain(); + gain2.gain.value = 0.25; + + source.connect(gain1).connect(gain2).connect(context.destination); + source.start(); + + context.startRendering() + .then(function(buffer) { + should( + buffer.getChannelData(0), + 'The output of chained connection of gain nodes') + .beConstantValueOf(0.125); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html new file mode 100644 index 0000000000..eca15dedfa --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html @@ -0,0 +1,77 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audionode-connect-order.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + let sampleRate = 44100.0; + let renderLengthSeconds = 0.125; + let delayTimeSeconds = 0.1; + + function createSinWaveBuffer(context, lengthInSeconds, frequency) { + let audioBuffer = + context.createBuffer(1, lengthInSeconds * sampleRate, sampleRate); + + let n = audioBuffer.length; + let data = audioBuffer.getChannelData(0); + + for (let i = 0; i < n; ++i) { + data[i] = Math.sin(frequency * 2 * Math.PI * i / sampleRate); + } + + return audioBuffer; + } + + audit.define( + { + label: 'Test connections', + description: + 'AudioNode connection order doesn\'t trigger assertion errors' + }, + function(task, should) { + // Create offline audio context. + let context = new OfflineAudioContext( + 1, sampleRate * renderLengthSeconds, sampleRate); + let toneBuffer = + createSinWaveBuffer(context, renderLengthSeconds, 880); + + let bufferSource = context.createBufferSource(); + bufferSource.buffer = toneBuffer; + bufferSource.connect(context.destination); + + let delay = context.createDelay(); + delay.delayTime.value = delayTimeSeconds; + + // We connect delay node to gain node before anything is connected + // to delay node itself. We do this because we try to trigger the + // ASSERT which might be fired due to AudioNode connection order, + // especially when gain node and delay node is involved e.g. + // https://bugs.webkit.org/show_bug.cgi?id=76685. + + should(() => { + let gain = context.createGain(); + gain.connect(context.destination); + delay.connect(gain); + }, 'Connecting nodes').notThrow(); + + bufferSource.start(0); + + let promise = context.startRendering(); + + should(promise, 'OfflineContext startRendering()') + .beResolved() + .then(task.done.bind(task)); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html new file mode 100644 index 0000000000..3af44fb7af --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html @@ -0,0 +1,15 @@ +<!DOCTYPE html> +<title>Test the return value of connect when connecting two AudioNodes</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +test(function(t) { + var context = new OfflineAudioContext(1, 1, 44100); + var g1 = context.createGain(); + var g2 = context.createGain(); + var rv = g1.connect(g2); + assert_equals(rv, g2); + var rv = g1.connect(g2); + assert_equals(rv, g2); +}, "connect should return the node connected to."); +</script> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html new file mode 100644 index 0000000000..0b09edd4a7 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html @@ -0,0 +1,221 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audionode-disconnect-audioparam.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let renderQuantum = 128; + + let sampleRate = 44100; + let renderDuration = 0.5; + let disconnectTime = 0.5 * renderDuration; + + let audit = Audit.createTaskRunner(); + + // Calculate the index for disconnection. + function getDisconnectIndex(disconnectTime) { + let disconnectIndex = disconnectTime * sampleRate; + disconnectIndex = renderQuantum * + Math.floor((disconnectIndex + renderQuantum - 1) / renderQuantum); + return disconnectIndex; + } + + // Get the index of value change. + function getValueChangeIndex(array, targetValue) { + return array.findIndex(function(element, index) { + if (element === targetValue) + return true; + }); + } + + // Task 1: test disconnect(AudioParam) method. + audit.define('disconnect(AudioParam)', (task, should) => { + // Creates a buffer source with value [1] and then connect it to two + // gain nodes in series. The output of the buffer source is lowered by + // half + // (* 0.5) and then connected to two |.gain| AudioParams in each gain + // node. + // + // (1) bufferSource => gain1 => gain2 + // (2) bufferSource => half => gain1.gain + // (3) half => gain2.gain + // + // This graph should produce the output of 2.25 (= 1 * 1.5 * 1.5). After + // disconnecting (3), it should produce 1.5. + let context = + new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + let source = context.createBufferSource(); + let buffer1ch = createConstantBuffer(context, 1, 1); + let half = context.createGain(); + let gain1 = context.createGain(); + let gain2 = context.createGain(); + + source.buffer = buffer1ch; + source.loop = true; + half.gain.value = 0.5; + + source.connect(gain1); + gain1.connect(gain2); + gain2.connect(context.destination); + source.connect(half); + + // Connecting |half| to both |gain1.gain| and |gain2.gain| amplifies the + // signal by 2.25 (= 1.5 * 1.5) because each gain node amplifies the + // signal by 1.5 (= 1.0 + 0.5). + half.connect(gain1.gain); + half.connect(gain2.gain); + + source.start(); + + // Schedule the disconnection at the half of render duration. + context.suspend(disconnectTime).then(function() { + half.disconnect(gain2.gain); + context.resume(); + }); + + context.startRendering() + .then(function(buffer) { + let channelData = buffer.getChannelData(0); + let disconnectIndex = getDisconnectIndex(disconnectTime); + let valueChangeIndex = getValueChangeIndex(channelData, 1.5); + + // Expected values are: 1 * 1.5 * 1.5 -> 1 * 1.5 = [2.25, 1.5] + should(channelData, 'Channel #0').containValues([2.25, 1.5]); + should(valueChangeIndex, 'The index of value change') + .beEqualTo(disconnectIndex); + }) + .then(() => task.done()); + }); + + // Task 2: test disconnect(AudioParam, output) method. + audit.define('disconnect(AudioParam, output)', (task, should) => { + // Create a 2-channel buffer source with [1, 2] in each channel and + // make a serial connection through gain1 and gain 2. The make the + // buffer source half with a gain node and connect it to a 2-output + // splitter. Connect each output to 2 gain AudioParams respectively. + // + // (1) bufferSource => gain1 => gain2 + // (2) bufferSource => half => splitter(2) + // (3) splitter#0 => gain1.gain + // (4) splitter#1 => gain2.gain + // + // This graph should produce 3 (= 1 * 1.5 * 2) and 6 (= 2 * 1.5 * 2) for + // each channel. After disconnecting (4), it should output 1.5 and 3. + let context = + new OfflineAudioContext(2, renderDuration * sampleRate, sampleRate); + let source = context.createBufferSource(); + let buffer2ch = createConstantBuffer(context, 1, [1, 2]); + let splitter = context.createChannelSplitter(2); + let half = context.createGain(); + let gain1 = context.createGain(); + let gain2 = context.createGain(); + + source.buffer = buffer2ch; + source.loop = true; + half.gain.value = 0.5; + + source.connect(gain1); + gain1.connect(gain2); + gain2.connect(context.destination); + + // |source| originally is [1, 2] but it becomes [0.5, 1] after 0.5 gain. + // Each splitter's output will be applied to |gain1.gain| and + // |gain2.gain| respectively in an additive fashion. + source.connect(half); + half.connect(splitter); + + // This amplifies the signal by 1.5. (= 1.0 + 0.5) + splitter.connect(gain1.gain, 0); + + // This amplifies the signal by 2. (= 1.0 + 1.0) + splitter.connect(gain2.gain, 1); + + source.start(); + + // Schedule the disconnection at the half of render duration. + context.suspend(disconnectTime).then(function() { + splitter.disconnect(gain2.gain, 1); + context.resume(); + }); + + context.startRendering() + .then(function(buffer) { + let channelData0 = buffer.getChannelData(0); + let channelData1 = buffer.getChannelData(1); + + let disconnectIndex = getDisconnectIndex(disconnectTime); + let valueChangeIndexCh0 = getValueChangeIndex(channelData0, 1.5); + let valueChangeIndexCh1 = getValueChangeIndex(channelData1, 3); + + // Expected values are: 1 * 1.5 * 2 -> 1 * 1.5 = [3, 1.5] + should(channelData0, 'Channel #0').containValues([3, 1.5]); + should( + valueChangeIndexCh0, + 'The index of value change in channel #0') + .beEqualTo(disconnectIndex); + + // Expected values are: 2 * 1.5 * 2 -> 2 * 1.5 = [6, 3] + should(channelData1, 'Channel #1').containValues([6, 3]); + should( + valueChangeIndexCh1, + 'The index of value change in channel #1') + .beEqualTo(disconnectIndex); + }) + .then(() => task.done()); + }); + + // Task 3: exception checks. + audit.define('exceptions', (task, should) => { + let context = new AudioContext(); + let gain1 = context.createGain(); + let splitter = context.createChannelSplitter(2); + let gain2 = context.createGain(); + let gain3 = context.createGain(); + + // Connect a splitter to gain nodes and merger so we can test the + // possible ways of disconnecting the nodes to verify that appropriate + // exceptions are thrown. + gain1.connect(splitter); + splitter.connect(gain2.gain, 0); + splitter.connect(gain3.gain, 1); + gain2.connect(gain3); + gain3.connect(context.destination); + + // gain1 is not connected to gain3.gain. Exception should be thrown. + should( + function() { + gain1.disconnect(gain3.gain); + }, + 'gain1.disconnect(gain3.gain)') + .throw(DOMException, 'InvalidAccessError'); + + // When the output index is good but the destination is invalid. + should( + function() { + splitter.disconnect(gain1.gain, 1); + }, + 'splitter.disconnect(gain1.gain, 1)') + .throw(DOMException, 'InvalidAccessError'); + + // When both arguments are wrong, throw IndexSizeError first. + should( + function() { + splitter.disconnect(gain1.gain, 2); + }, + 'splitter.disconnect(gain1.gain, 2)') + .throw(DOMException, 'IndexSizeError'); + + task.done(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html new file mode 100644 index 0000000000..65b93222d1 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html @@ -0,0 +1,298 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audionode-disconnect.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + // Task 1: test disconnect() method. + audit.define('disconnect()', (task, should) => { + + // Connect a source to multiple gain nodes, each connected to the + // destination. Then disconnect the source. The expected output should + // be all zeros since the source was disconnected. + let context = new OfflineAudioContext(1, 128, 44100); + let source = context.createBufferSource(); + let buffer1ch = createConstantBuffer(context, 128, [1]); + let gain1 = context.createGain(); + let gain2 = context.createGain(); + let gain3 = context.createGain(); + + source.buffer = buffer1ch; + + source.connect(gain1); + source.connect(gain2); + source.connect(gain3); + gain1.connect(context.destination); + gain2.connect(context.destination); + gain3.connect(context.destination); + source.start(); + + // This disconnects everything. + source.disconnect(); + + context.startRendering() + .then(function(buffer) { + + // With everything disconnected, the result should be zero. + should(buffer.getChannelData(0), 'Channel #0') + .beConstantValueOf(0); + + }) + .then(() => task.done()); + }); + + // Task 2: test disconnect(output) method. + audit.define('disconnect(output)', (task, should) => { + + // Create multiple connections from each output of a ChannelSplitter + // to a gain node. Then test if disconnecting a single output of + // splitter is actually disconnected. + let context = new OfflineAudioContext(1, 128, 44100); + let source = context.createBufferSource(); + let buffer3ch = createConstantBuffer(context, 128, [1, 2, 3]); + let splitter = context.createChannelSplitter(3); + let sum = context.createGain(); + + source.buffer = buffer3ch; + + source.connect(splitter); + splitter.connect(sum, 0); + splitter.connect(sum, 1); + splitter.connect(sum, 2); + sum.connect(context.destination); + source.start(); + + // This disconnects the second output. + splitter.disconnect(1); + + context.startRendering() + .then(function(buffer) { + + // The rendered channel should contain 4. (= 1 + 0 + 3) + should(buffer.getChannelData(0), 'Channel #0') + .beConstantValueOf(4); + + }) + .then(() => task.done()); + }); + + // Task 3: test disconnect(AudioNode) method. + audit.define('disconnect(AudioNode)', (task, should) => { + + // Connect a source to multiple gain nodes. Then test if disconnecting a + // single destination selectively works correctly. + let context = new OfflineAudioContext(1, 128, 44100); + let source = context.createBufferSource(); + let buffer1ch = createConstantBuffer(context, 128, [1]); + let gain1 = context.createGain(); + let gain2 = context.createGain(); + let gain3 = context.createGain(); + let orphan = context.createGain(); + + source.buffer = buffer1ch; + + source.connect(gain1); + source.connect(gain2); + source.connect(gain3); + gain1.connect(context.destination); + gain2.connect(context.destination); + gain3.connect(context.destination); + source.start(); + + source.disconnect(gain2); + + context.startRendering() + .then(function(buffer) { + + // The |sum| gain node should produce value 2. (1 + 0 + 1 = 2) + should(buffer.getChannelData(0), 'Channel #0') + .beConstantValueOf(2); + + }) + .then(() => task.done()); + }); + + // Task 4: test disconnect(AudioNode, output) method. + audit.define('disconnect(AudioNode, output)', (task, should) => { + + // Connect a buffer with 2 channels with each containing 1 and 2 + // respectively to a ChannelSplitter, then connect the splitter to 2 + // gain nodes as shown below: + // (1) splitter#0 => gain1 + // (2) splitter#0 => gain2 + // (3) splitter#1 => gain2 + // Then disconnect (2) and verify if the selective disconnection on a + // specified output of the destination node works correctly. + let context = new OfflineAudioContext(1, 128, 44100); + let source = context.createBufferSource(); + let buffer2ch = createConstantBuffer(context, 128, [1, 2]); + let splitter = context.createChannelSplitter(2); + let gain1 = context.createGain(); + let gain2 = context.createGain(); + + source.buffer = buffer2ch; + + source.connect(splitter); + splitter.connect(gain1, 0); // gain1 gets channel 0. + splitter.connect(gain2, 0); // gain2 sums channel 0 and 1. + splitter.connect(gain2, 1); + gain1.connect(context.destination); + gain2.connect(context.destination); + source.start(); + + splitter.disconnect(gain2, 0); // Now gain2 gets [2] + + context.startRendering() + .then(function(buffer) { + + // The sum of gain1 and gain2 should produce value 3. (= 1 + 2) + should(buffer.getChannelData(0), 'Channel #0') + .beConstantValueOf(3); + + }) + .then(() => task.done()); + }); + + // Task 5: test disconnect(AudioNode, output, input) method. + audit.define('disconnect(AudioNode, output, input)', (task, should) => { + + // Create a 3-channel buffer with [1, 2, 3] in each channel and then + // pass it through a splitter and a merger. Each input/output of the + // splitter and the merger is connected in a sequential order as shown + // below. + // (1) splitter#0 => merger#0 + // (2) splitter#1 => merger#1 + // (3) splitter#2 => merger#2 + // Then disconnect (3) and verify if each channel contains [1] and [2] + // respectively. + let context = new OfflineAudioContext(3, 128, 44100); + let source = context.createBufferSource(); + let buffer3ch = createConstantBuffer(context, 128, [1, 2, 3]); + let splitter = context.createChannelSplitter(3); + let merger = context.createChannelMerger(3); + + source.buffer = buffer3ch; + + source.connect(splitter); + splitter.connect(merger, 0, 0); + splitter.connect(merger, 1, 1); + splitter.connect(merger, 2, 2); + merger.connect(context.destination); + source.start(); + + splitter.disconnect(merger, 2, 2); + + context.startRendering() + .then(function(buffer) { + + // Each channel should have 1, 2, and 0 respectively. + should(buffer.getChannelData(0), 'Channel #0') + .beConstantValueOf(1); + should(buffer.getChannelData(1), 'Channel #1') + .beConstantValueOf(2); + should(buffer.getChannelData(2), 'Channel #2') + .beConstantValueOf(0); + + }) + .then(() => task.done()); + }); + + // Task 6: exception checks. + audit.define('exceptions', (task, should) => { + let context = new OfflineAudioContext(2, 128, 44100); + let gain1 = context.createGain(); + let splitter = context.createChannelSplitter(2); + let merger = context.createChannelMerger(2); + let gain2 = context.createGain(); + let gain3 = context.createGain(); + + // Connect a splitter to gain nodes and merger so we can test the + // possible ways of disconnecting the nodes to verify that appropriate + // exceptions are thrown. + gain1.connect(splitter); + splitter.connect(gain2, 0); + splitter.connect(gain3, 1); + splitter.connect(merger, 0, 0); + splitter.connect(merger, 1, 1); + gain2.connect(gain3); + gain3.connect(context.destination); + merger.connect(context.destination); + + // There is no output #2. An exception should be thrown. + should(function() { + splitter.disconnect(2); + }, 'splitter.disconnect(2)').throw(DOMException, 'IndexSizeError'); + + // Disconnecting the output already disconnected should not throw. + should(function() { + splitter.disconnect(1); + splitter.disconnect(1); + }, 'Disconnecting a connection twice').notThrow(); + + // gain1 is not connected gain2. An exception should be thrown. + should(function() { + gain1.disconnect(gain2); + }, 'gain1.disconnect(gain2)').throw(DOMException, 'InvalidAccessError'); + + // gain1 and gain3 are not connected. An exception should be thrown. + should(function() { + gain1.disconnect(gain3); + }, 'gain1.disconnect(gain3)').throw(DOMException, 'InvalidAccessError'); + + // There is no output #2 in the splitter. An exception should be thrown. + should(function() { + splitter.disconnect(gain2, 2); + }, 'splitter.disconnect(gain2, 2)').throw(DOMException, 'IndexSizeError'); + + // The splitter and gain1 are not connected. An exception should be + // thrown. + should(function() { + splitter.disconnect(gain1, 0); + }, 'splitter.disconnect(gain1, 0)').throw(DOMException, 'InvalidAccessError'); + + // The splitter output #0 and the gain3 output #0 are not connected. An + // exception should be thrown. + should(function() { + splitter.disconnect(gain3, 0, 0); + }, 'splitter.disconnect(gain3, 0, 0)').throw(DOMException, 'InvalidAccessError'); + + // The output index is out of bound. An exception should be thrown. + should(function() { + splitter.disconnect(merger, 3, 0); + }, 'splitter.disconnect(merger, 3, 0)').throw(DOMException, 'IndexSizeError'); + + task.done(); + }); + + audit.define('disabled-outputs', (task, should) => { + // See crbug.com/656652 + let context = new OfflineAudioContext(2, 1024, 44100); + let g1 = context.createGain(); + let g2 = context.createGain(); + g1.connect(g2); + g1.disconnect(g2); + let g3 = context.createGain(); + g2.connect(g3); + g1.connect(g2); + context.startRendering() + .then(function() { + // If we make it here, we passed. + should(true, 'Disabled outputs handled') + .message('correctly', 'inccorrectly'); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js new file mode 100644 index 0000000000..89bdf2aa98 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js @@ -0,0 +1,14 @@ +test(function() { + const iframe = + document.createElementNS('http://www.w3.org/1999/xhtml', 'iframe'); + document.body.appendChild(iframe); + + // Create AudioContext and AudioNode from iframe + const context = new iframe.contentWindow.AudioContext(); + const source = context.createOscillator(); + source.connect(context.destination); + + // AudioContext should be put closed state after iframe destroyed + document.body.removeChild(iframe); + assert_equals(context.state, 'closed'); +}, 'Call a constructor from iframe page and then destroy the iframe'); diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html new file mode 100644 index 0000000000..0b57d27e8e --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html @@ -0,0 +1,93 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audionode.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <div id="description"></div> + <div id="console"></div> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + let context = 0; + let context2 = 0; + let context3 = 0; + + audit.define( + {label: 'test', description: 'Basic tests for AudioNode API.'}, + function(task, should) { + + context = new AudioContext(); + window.audioNode = context.createBufferSource(); + + // Check input and output numbers of AudioSourceNode. + should(audioNode.numberOfInputs, 'AudioBufferSource.numberOfInputs') + .beEqualTo(0); + should( + audioNode.numberOfOutputs, 'AudioBufferSource.numberOfOutputs') + .beEqualTo(1); + + // Check input and output numbers of AudioDestinationNode + should( + context.destination.numberOfInputs, + 'AudioContext.destination.numberOfInputs') + .beEqualTo(1); + should( + context.destination.numberOfOutputs, + 'AudioContext.destination.numberOfOutputs') + .beEqualTo(0); + + // Try calling connect() method with illegal values. + should( + () => audioNode.connect(0, 0, 0), 'audioNode.connect(0, 0, 0)') + .throw(TypeError); + should( + () => audioNode.connect(null, 0, 0), + 'audioNode.connect(null, 0, 0)') + .throw(TypeError); + should( + () => audioNode.connect(context.destination, 5, 0), + 'audioNode.connect(context.destination, 5, 0)') + .throw(DOMException, 'IndexSizeError'); + should( + () => audioNode.connect(context.destination, 0, 5), + 'audioNode.connect(context.destination, 0, 5)') + .throw(DOMException, 'IndexSizeError'); + + should( + () => audioNode.connect(context.destination, 0, 0), + 'audioNode.connect(context.destination, 0, 0)') + .notThrow(); + + // Create a new context and try to connect the other context's node + // to this one. + context2 = new AudioContext(); + should( + () => window.audioNode.connect(context2.destination), + 'Connecting a node to a different context') + .throw(DOMException, 'InvalidAccessError'); + + // 3-arg AudioContext doesn't create an offline context anymore. + should( + () => context3 = new AudioContext(1, 44100, 44100), + 'context3 = new AudioContext(1, 44100, 44100)') + .throw(TypeError); + + // Ensure it is an EventTarget + should( + audioNode instanceof EventTarget, 'AudioNode is an EventTarget') + .beTrue(); + + task.done(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html new file mode 100644 index 0000000000..35cfca8e4e --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html @@ -0,0 +1,66 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test Setting of channelCountMode and channelInterpretation + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + // Fairly arbitrary sample rate and number of frames, except the number of + // frames should be more than a few render quantums. + let sampleRate = 16000; + let renderFrames = 10 * 128; + + let audit = Audit.createTaskRunner(); + + audit.define('interp', (task, should) => { + let context = new OfflineAudioContext(1, renderFrames, sampleRate); + let node = context.createGain(); + + // Set a new interpretation and verify that it changed. + node.channelInterpretation = 'discrete'; + let value = node.channelInterpretation; + should(value, 'node.channelInterpretation').beEqualTo('discrete'); + node.connect(context.destination); + + context.startRendering() + .then(function(buffer) { + // After rendering, the value should have been changed. + should( + node.channelInterpretation, + 'After rendering node.channelInterpretation') + .beEqualTo('discrete'); + }) + .then(() => task.done()); + }); + + audit.define('mode', (task, should) => { + let context = new OfflineAudioContext(1, renderFrames, sampleRate); + let node = context.createGain(); + + // Set a new mode and verify that it changed. + node.channelCountMode = 'explicit'; + let value = node.channelCountMode; + should(value, 'node.channelCountMode').beEqualTo('explicit'); + node.connect(context.destination); + + context.startRendering() + .then(function(buffer) { + // After rendering, the value should have been changed. + should( + node.channelCountMode, + 'After rendering node.channelCountMode') + .beEqualTo('explicit'); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html new file mode 100644 index 0000000000..f763d34787 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Connections and disconnections with different contexts + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script> + let audit = Audit.createTaskRunner(); + + // Different contexts to be used for testing. + let c1; + let c2; + + audit.define( + {label: 'setup', description: 'Contexts for testing'}, + (task, should) => { + should(() => {c1 = new AudioContext()}, 'c1 = new AudioContext()') + .notThrow(); + should(() => {c2 = new AudioContext()}, 'c2 = new AudioContext()') + .notThrow(); + task.done(); + }); + + audit.define( + {label: 'Test 1', description: 'Connect nodes between contexts'}, + (task, should) => { + let g1; + let g2; + should( + () => {g1 = new GainNode(c1)}, 'Test 1: g1 = new GainNode(c1)') + .notThrow(); + should( + () => {g2 = new GainNode(c2)}, 'Test 1: g2 = new GainNode(c2)') + .notThrow(); + should(() => {g2.connect(g1)}, 'Test 1: g2.connect(g1)') + .throw(DOMException, 'InvalidAccessError'); + task.done(); + }); + + audit.define( + {label: 'Test 2', description: 'Connect AudioParam between contexts'}, + (task, should) => { + let g1; + let g2; + should( + () => {g1 = new GainNode(c1)}, 'Test 2: g1 = new GainNode(c1)') + .notThrow(); + should( + () => {g2 = new GainNode(c2)}, 'Test 2: g2 = new GainNode(c2)') + .notThrow(); + should(() => {g2.connect(g1.gain)}, 'Test 2: g2.connect(g1.gain)') + .throw(DOMException, 'InvalidAccessError'); + task.done(); + }); + + audit.define( + {label: 'Test 3', description: 'Disconnect nodes between contexts'}, + (task, should) => { + let g1; + let g2; + should( + () => {g1 = new GainNode(c1)}, 'Test 3: g1 = new GainNode(c1)') + .notThrow(); + should( + () => {g2 = new GainNode(c2)}, 'Test 3: g2 = new GainNode(c2)') + .notThrow(); + should(() => {g2.disconnect(g1)}, 'Test 3: g2.disconnect(g1)') + .throw(DOMException, 'InvalidAccessError'); + task.done(); + }); + + audit.define( + { + label: 'Test 4', + description: 'Disconnect AudioParam between contexts' + }, + (task, should) => { + let g1; + let g2; + should( + () => {g1 = new GainNode(c1)}, 'Test 4: g1 = new GainNode(c1)') + .notThrow(); + should( + () => {g2 = new GainNode(c2)}, 'Test 4: g2 = new GainNode(c2)') + .notThrow(); + should( + () => {g2.disconnect(g1.gain)}, 'Test 4: g2.connect(g1.gain)') + .throw(DOMException, 'InvalidAccessError'); + task.done(); + }); + + audit.run(); + </script> + </body> +</html> |