diff options
Diffstat (limited to 'testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface')
17 files changed, 1564 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html new file mode 100644 index 0000000000..a83fa1dbe6 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html @@ -0,0 +1,31 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Testing behavior of AudioContext after execution context is detached + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + const audit = Audit.createTaskRunner(); + + audit.define('decoding-on-detached-iframe', (task, should) => { + const iframe = + document.createElementNS("http://www.w3.org/1999/xhtml", "iframe"); + document.body.appendChild(iframe); + let context = new iframe.contentWindow.AudioContext(); + document.body.removeChild(iframe); + + should(context.decodeAudioData(new ArrayBuffer(1)), + 'decodeAudioData() upon a detached iframe') + .beRejectedWith('InvalidStateError') + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html new file mode 100644 index 0000000000..5889faf7cc --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html @@ -0,0 +1,32 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Testing AudioContext.getOutputTimestamp() method (cross-realm) + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + const audit = Audit.createTaskRunner(); + + audit.define("getoutputtimestamp-cross-realm", function(task, should) { + const mainContext = new AudioContext(); + return task.timeout(() => { + const iframe = document.createElement("iframe"); + document.body.append(iframe); + const iframeContext = new iframe.contentWindow.AudioContext(); + + should(mainContext.getOutputTimestamp().performanceTime, "mainContext's performanceTime") + .beGreaterThan(iframeContext.getOutputTimestamp().performanceTime, "iframeContext's performanceTime"); + should(iframeContext.getOutputTimestamp.call(mainContext).performanceTime, "mainContext's performanceTime (via iframeContext's method)") + .beCloseTo(mainContext.getOutputTimestamp().performanceTime, "mainContext's performanceTime", { threshold: 0.01 }); + }, 1000); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html new file mode 100644 index 0000000000..952f38b1ed --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html @@ -0,0 +1,33 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Testing AudioContext.getOutputTimestamp() method + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + audit.define('getoutputtimestamp-initial-values', function(task, should) { + let context = new AudioContext; + let timestamp = context.getOutputTimestamp(); + + should(timestamp.contextTime, 'timestamp.contextTime').exist(); + should(timestamp.performanceTime, 'timestamp.performanceTime').exist(); + + should(timestamp.contextTime, 'timestamp.contextTime') + .beGreaterThanOrEqualTo(0); + should(timestamp.performanceTime, 'timestamp.performanceTime') + .beGreaterThanOrEqualTo(0); + + task.done(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html new file mode 100644 index 0000000000..e4f6001eda --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html @@ -0,0 +1,94 @@ +<!doctype html> +<title>Test AudioContext construction when document is not fully active</title> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script src="/common/get-host-info.sub.js"></script> +<body></body> +<script> +const dir = location.pathname.replace(/\/[^\/]*$/, '/'); +const helper = dir + 'resources/not-fully-active-helper.sub.html?childsrc='; +const remote_helper = get_host_info().HTTP_NOTSAMESITE_ORIGIN + helper; +const blank_url = get_host_info().ORIGIN + '/common/blank.html'; + +const load_content = (frame, src) => { + if (src == undefined) { + frame.srcdoc = '<html></html>'; + } else { + frame.src = src; + } + return new Promise(resolve => frame.onload = () => resolve(frame)); +}; +const append_iframe = (src) => { + const frame = document.createElement('iframe'); + document.body.appendChild(frame); + return load_content(frame, src); +}; +const remote_op = (win, op) => { + win.postMessage(op, '*'); + return new Promise(resolve => window.onmessage = e => { + if (e.data == 'DONE ' + op) resolve(); + }); +}; +const test_constructor_throws = async (win, deactivate) => { + const {AudioContext, DOMException} = win; + await deactivate(); + assert_throws_dom("InvalidStateError", DOMException, + () => new AudioContext()); +}; + +promise_test(async () => { + const frame = await append_iframe(); + return test_constructor_throws(frame.contentWindow, () => frame.remove()); +}, "removed frame"); +promise_test(async () => { + const frame = await append_iframe(); + return test_constructor_throws(frame.contentWindow, + () => load_content(frame)); +}, "navigated frame"); +promise_test(async () => { + const frame = await append_iframe(helper + blank_url); + const inner = frame.contentWindow.frames[0]; + return test_constructor_throws(inner, () => frame.remove()); +}, "frame in removed frame"); +promise_test(async () => { + const frame = await append_iframe(helper + blank_url); + const inner = frame.contentWindow.frames[0]; + return test_constructor_throws(inner, () => load_content(frame)); +}, "frame in navigated frame"); +promise_test(async () => { + const frame = await append_iframe(remote_helper + blank_url); + const inner = frame.contentWindow.frames[0]; + return test_constructor_throws(inner, () => frame.remove()); +}, "frame in removed remote-site frame"); +promise_test(async () => { + const frame = await append_iframe(remote_helper + blank_url); + const inner = frame.contentWindow.frames[0]; + return test_constructor_throws(inner, () => load_content(frame)); +}, "frame in navigated remote-site frame"); +promise_test(async () => { + const outer = (await append_iframe(remote_helper + blank_url)).contentWindow; + const inner = outer.frames[0]; + return test_constructor_throws(inner, + () => remote_op(outer, 'REMOVE FRAME')); +}, "removed frame in remote-site frame"); +promise_test(async () => { + const outer = (await append_iframe(remote_helper + blank_url)).contentWindow; + const inner = outer.frames[0]; + return test_constructor_throws(inner, + () => remote_op(outer, 'NAVIGATE FRAME')); +}, "navigated frame in remote-site frame"); +promise_test(async () => { + const url = remote_helper + helper + blank_url; + const outer = (await append_iframe(url)).contentWindow; + const inner = outer.frames[0].frames[0]; + return test_constructor_throws(inner, + () => remote_op(outer, 'REMOVE FRAME')); +}, "frame in removed remote-site frame in remote-site frame"); +promise_test(async () => { + const url = remote_helper + helper + blank_url; + const outer = (await append_iframe(url)).contentWindow; + const inner = outer.frames[0].frames[0]; + return test_constructor_throws(inner, + () => remote_op(outer, 'NAVIGATE FRAME')); +}, "frame in navigated remote-site frame in remote-site frame"); +</script> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html new file mode 100644 index 0000000000..2dedd6cd36 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html @@ -0,0 +1,122 @@ +<!DOCTYPE html> +<head> +<title>Test AudioContext constructor with sinkId options</title> +</head> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script> +"use strict"; + +let outputDeviceList = null; +let firstDeviceId = null; + +navigator.mediaDevices.getUserMedia({audio: true}).then(() => { + navigator.mediaDevices.enumerateDevices().then((deviceList) => { + outputDeviceList = + deviceList.filter(({kind}) => kind === 'audiooutput'); + assert_greater_than(outputDeviceList.length, 1, + 'the system must have more than 1 device.'); + firstDeviceId = outputDeviceList[1].deviceId; + + // Run async tests concurrently. + async_test(t => testDefaultSinkId(t), + 'Setting sinkId to the empty string at construction should ' + + 'succeed.'); + async_test(t => testValidSinkId(t), + 'Setting sinkId with a valid device identifier at ' + + 'construction should succeed.'); + async_test(t => testAudioSinkOptions(t), + 'Setting sinkId with an AudioSinkOptions at construction ' + + 'should succeed.'); + async_test(t => testExceptions(t), + 'Invalid sinkId arguments should throw an appropriate ' + + 'exception.') + }); +}); + +// 1.2.1. AudioContext constructor +// https://webaudio.github.io/web-audio-api/#AudioContext-constructors + +// Step 10.1.1. If sinkId is equal to [[sink ID]], abort these substeps. +const testDefaultSinkId = (t) => { + // The initial `sinkId` is the empty string. This will cause the same value + // check. + const audioContext = new AudioContext({sinkId: ''}); + audioContext.addEventListener('statechange', () => { + t.step(() => { + assert_equals(audioContext.sinkId, ''); + assert_equals(audioContext.state, 'running'); + }); + audioContext.close(); + t.done(); + }, {once: true}); +}; + +// Step 10.1.2~3: See "Validating sinkId" tests below. + +// Step 10.1.4. If sinkId is a type of DOMString, set [[sink ID]] to sinkId and +// abort these substeps. +const testValidSinkId = (t) => { + const audioContext = new AudioContext({sinkId: firstDeviceId}); + audioContext.addEventListener('statechange', () => { + t.step(() => { + assert_true(audioContext.sinkId === firstDeviceId, + 'the context sinkId should match the given sinkId.'); + }); + audioContext.close(); + t.done(); + }, {once: true}); + t.step_timeout(t.unreached_func('onstatechange not fired or assert failed'), + 100); +}; + +// Step 10.1.5. If sinkId is a type of AudioSinkOptions, set [[sink ID]] to a +// new instance of AudioSinkInfo created with the value of type of sinkId. +const testAudioSinkOptions = (t) => { + const audioContext = new AudioContext({sinkId: {type: 'none'}}); + // The only signal we can use for the sinkId change after construction is + // `statechange` event. + audioContext.addEventListener('statechange', () => { + t.step(() => { + assert_equals(typeof audioContext.sinkId, 'object'); + assert_equals(audioContext.sinkId.type, 'none'); + }); + audioContext.close(); + t.done(); + }, {once: true}); + t.step_timeout(t.unreached_func('onstatechange not fired or assert failed'), + 100); +}; + +// 1.2.4. Validating sinkId +// https://webaudio.github.io/web-audio-api/#validating-sink-identifier + +// Step 3. If document is not allowed to use the feature identified by +// "speaker-selection", return a new DOMException whose name is +// "NotAllowedError". +// TODO: Due to the lack of implementation, this step is not tested. + +const testExceptions = (t) => { + t.step(() => { + // The wrong AudioSinkOption.type should cause a TypeError. + assert_throws_js(TypeError, () => { + const audioContext = new AudioContext({sinkId: {type: 'something_else'}}); + audioContext.close(); + }, 'An invalid AudioSinkOptions.type value should throw a TypeError ' + + 'exception.'); + }); + + t.step(() => { + // Step 4. If sinkIdArg is a type of DOMString but it is not equal to the + // empty string or it does not match any audio output device identified by + // the result that would be provided by enumerateDevices(), return a new + // DOMException whose name is "NotFoundError". + assert_throws_dom('NotFoundError', () => { + const audioContext = new AudioContext({sinkId: 'some_random_device_id'}); + audioContext.close(); + }, 'An invalid device identifier should throw a NotFoundError exception.'); + }); + t.done(); +}; +</script> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html new file mode 100644 index 0000000000..61d2586bfb --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html @@ -0,0 +1,122 @@ +<!DOCTYPE html> +<head> +<title>Test AudioContext.setSinkId() method</title> +</head> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script> +"use strict"; + +const audioContext = new AudioContext(); +let outputDeviceList = null; +let firstDeviceId = null; + +// Setup: Get permission via getUserMedia() and a list of audio output devices. +promise_setup(async t => { + await navigator.mediaDevices.getUserMedia({ audio: true }); + const deviceList = await navigator.mediaDevices.enumerateDevices(); + outputDeviceList = + deviceList.filter(({kind}) => kind === 'audiooutput'); + assert_greater_than(outputDeviceList.length, 1, + 'the system must have more than 1 device.'); + firstDeviceId = outputDeviceList[1].deviceId; +}, 'Get permission via getUserMedia() and a list of audio output devices.'); + + +// 1.2.3. AudioContext.setSinkId() method +// https://webaudio.github.io/web-audio-api/#dom-audiocontext-setsinkid-domstring-or-audiosinkoptions-sinkid + +promise_test(async t => { + t.step(() => { + // The default value of `sinkId` is the empty string. + assert_equals(audioContext.sinkId, ''); + }); + t.done(); +}, 'setSinkId() with a valid device identifier should succeeded.'); + +promise_test(async t => { + // Change to the first non-default device in the list. + await audioContext.setSinkId(firstDeviceId); + t.step(() => { + // If both `sinkId` and [[sink ID]] are a type of DOMString, and they are + // equal to each other, resolve the promise immediately. + assert_equals(typeof audioContext.sinkId, 'string'); + assert_equals(audioContext.sinkId, firstDeviceId); + }); + return audioContext.setSinkId(firstDeviceId); +}, 'setSinkId() with the same sink ID should resolve immediately.'); + +promise_test(async t => { + // If sinkId is a type of AudioSinkOptions and [[sink ID]] is a type of + // AudioSinkInfo, and type in sinkId and type in [[sink ID]] are equal, + // resolve the promise immediately. + await audioContext.setSinkId({type: 'none'}); + t.step(() => { + assert_equals(typeof audioContext.sinkId, 'object'); + assert_equals(audioContext.sinkId.type, 'none'); + }); + return audioContext.setSinkId({type: 'none'}); +}, 'setSinkId() with the same AudioSinkOptions.type value should resolve ' + + 'immediately.'); + +// 1.2.4. Validating sinkId +// https://webaudio.github.io/web-audio-api/#validating-sink-identifier + +// Step 3. If document is not allowed to use the feature identified by +// "speaker-selection", return a new DOMException whose name is +// "NotAllowedError". +// TODO: Due to the lack of implementation, this step is not tested. + +// The wrong AudioSinkOption.type should cause a TypeError. +promise_test(t => + promise_rejects_js(t, TypeError, + audioContext.setSinkId({type: 'something_else'})), + 'setSinkId() should fail with TypeError on an invalid ' + + 'AudioSinkOptions.type value.'); + +// Step 4. If sinkId is a type of DOMString but it is not equal to the empty +// string or it does not match any audio output device identified by the result +// that would be provided by enumerateDevices(), return a new DOMException whose +// name is "NotFoundError". +promise_test(t => + promise_rejects_dom(t, 'NotFoundError', + audioContext.setSinkId('some_random_device_id')), + 'setSinkId() should fail with NotFoundError on an invalid device ' + + 'identifier.'); + +// setSinkId invoked from closed AudioContext should throw InvalidStateError +// DOMException. +promise_test(async t => { + await audioContext.close(); + t.step(() => { + assert_equals(audioContext.state, 'closed'); + }); + promise_rejects_dom(t, 'InvalidStateError', + audioContext.setSinkId('some_random_device_id')) +},'setSinkId() should fail with InvalidStateError when calling from a' + + 'stopped AudioContext'); + +// setSinkId invoked from detached document should throw InvalidStateError +// DOMException. +promise_test(async t => { + const iframe = document.createElementNS("http://www.w3.org/1999/xhtml", "iframe"); + document.body.appendChild(iframe); + let iframeAudioContext = new iframe.contentWindow.AudioContext(); + document.body.removeChild(iframe); + promise_rejects_dom(t, 'InvalidStateError', + iframeAudioContext.setSinkId('some_random_device_id')); +},'setSinkId() should fail with InvalidStateError when calling from a' + + 'detached document'); + +// Pending setSinkId() promises should be rejected with a +// DOMException of InvalidStateError when the context is closed. +// See: crbug.com/1408376 +promise_test(async t => { + const audioContext = new AudioContext(); + promise_rejects_dom(t, 'InvalidStateError', + audioContext.setSinkId('some_random_device_id')); + await audioContext.close(); +},'pending setSinkId() should be rejected with InvalidStateError when' + + 'AudioContext is closed'); +</script> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html new file mode 100644 index 0000000000..c22f69c18d --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html @@ -0,0 +1,83 @@ +<!DOCTYPE html> +<head> +<title>Test AudioContext.setSinkId() state change</title> +</head> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script> +"use strict"; + +const audioContext = new AudioContext(); +let outputDeviceList = null; +let firstDeviceId = null; + +// Setup: Get permission via getUserMedia() and a list of audio output devices. +promise_setup(async t => { + await navigator.mediaDevices.getUserMedia({ audio: true }); + const deviceList = await navigator.mediaDevices.enumerateDevices(); + outputDeviceList = + deviceList.filter(({kind}) => kind === 'audiooutput'); + assert_greater_than(outputDeviceList.length, 1, + 'the system must have more than 1 device.'); + firstDeviceId = outputDeviceList[1].deviceId; +}, 'Get permission via getUserMedia() and a list of audio output devices.'); + +// Test the sink change when from a suspended context. +promise_test(async t => { + let events = []; + await audioContext.suspend(); + + // Step 6. Set wasRunning to false if the [[rendering thread state]] on the + // AudioContext is "suspended". + assert_equals(audioContext.state, 'suspended'); + + // Step 11.5. Fire an event named sinkchange at the associated AudioContext. + audioContext.onsinkchange = t.step_func(() => { + events.push('sinkchange'); + assert_equals(audioContext.sinkId, firstDeviceId); + }); + + await audioContext.setSinkId(firstDeviceId); + assert_equals(events[0], 'sinkchange'); + t.done(); +}, 'Calling setSinkId() on a suspended AudioContext should fire only sink ' + + 'change events.'); + +// Test the sink change when from a running context. +promise_test(async t => { + let events = []; + await audioContext.resume(); + + // Step 9. If wasRunning is true: + assert_equals(audioContext.state, 'running'); + + // Step 9.2.1. Set the state attribute of the AudioContext to "suspended". + // Fire an event named statechange at the associated AudioContext. + audioContext.onstatechange = t.step_func(() => { + events.push('statechange:suspended'); + assert_equals(audioContext.state, 'suspended'); + }); + + // Step 11.5. Fire an event named sinkchange at the associated AudioContext. + audioContext.onsinkchange = t.step_func(() => { + events.push('sinkchange'); + assert_equals(audioContext.sinkId, firstDeviceId); + }); + + // Step 12.2. Set the state attribute of the AudioContext to "running". + // Fire an event named statechange at the associated AudioContext. + audioContext.onstatechange = t.step_func(() => { + events.push('statechange:running'); + assert_equals(audioContext.state, 'running'); + }); + + await audioContext.setSinkId(firstDeviceId); + assert_equals(events.length, 3); + assert_equals(events[0], 'statechange:suspended'); + assert_equals(events[1], 'sinkchange'); + assert_equals(events[2], 'statechange:running'); + t.done(); +}, 'Calling setSinkId() on a running AudioContext should fire both state ' + + 'and sink change events.'); +</script> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html new file mode 100644 index 0000000000..192317dda2 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html @@ -0,0 +1,406 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset="utf-8" /> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script type="module"> +"use strict"; + +function tryToCreateNodeOnClosedContext(ctx) { + assert_equals(ctx.state, "closed", "The context is in closed state"); + + [ + { name: "createBufferSource" }, + { + name: "createMediaStreamDestination", + onOfflineAudioContext: false, + }, + { name: "createScriptProcessor" }, + { name: "createStereoPanner" }, + { name: "createAnalyser" }, + { name: "createGain" }, + { name: "createDelay" }, + { name: "createBiquadFilter" }, + { name: "createWaveShaper" }, + { name: "createPanner" }, + { name: "createConvolver" }, + { name: "createChannelSplitter" }, + { name: "createChannelMerger" }, + { name: "createDynamicsCompressor" }, + { name: "createOscillator" }, + { + name: "createMediaElementSource", + args: [new Audio()], + onOfflineAudioContext: false, + }, + { + name: "createMediaStreamSource", + args: [new AudioContext().createMediaStreamDestination().stream], + onOfflineAudioContext: false, + }, + ].forEach(function (e) { + if ( + e.onOfflineAudioContext == false && + ctx instanceof OfflineAudioContext + ) { + return; + } + + try { + ctx[e.name].apply(ctx, e.args); + } catch (err) { + assert_true(false, "unexpected exception thrown for " + e.name); + } + }); +} + +function loadFile(url, callback) { + return new Promise((resolve) => { + var xhr = new XMLHttpRequest(); + xhr.open("GET", url, true); + xhr.responseType = "arraybuffer"; + xhr.onload = function () { + resolve(xhr.response); + }; + xhr.send(); + }); +} + +// createBuffer, createPeriodicWave and decodeAudioData should work on a context +// that has `state` == "closed" +async function tryLegalOpeerationsOnClosedContext(ctx) { + assert_equals(ctx.state, "closed", "The context is in closed state"); + + [ + { name: "createBuffer", args: [1, 44100, 44100] }, + { + name: "createPeriodicWave", + args: [new Float32Array(10), new Float32Array(10)], + }, + ].forEach(function (e) { + try { + ctx[e.name].apply(ctx, e.args); + } catch (err) { + assert_true(false, "unexpected exception thrown"); + } + }); + var buf = await loadFile("/webaudio/resources/sin_440Hz_-6dBFS_1s.wav"); + return ctx + .decodeAudioData(buf) + .then(function (decodedBuf) { + assert_true( + true, + "decodeAudioData on a closed context should work, it did." + ); + }) + .catch(function (e) { + assert_true( + false, + "decodeAudioData on a closed context should work, it did not" + ); + }); +} + +// Test that MediaStreams that are the output of a suspended AudioContext are +// producing silence +// ac1 produce a sine fed to a MediaStreamAudioDestinationNode +// ac2 is connected to ac1 with a MediaStreamAudioSourceNode, and check that +// there is silence when ac1 is suspended +async function testMultiContextOutput() { + var ac1 = new AudioContext(), + ac2 = new AudioContext(); + + await new Promise((resolve) => (ac1.onstatechange = resolve)); + + ac1.onstatechange = null; + await ac1.suspend(); + assert_equals(ac1.state, "suspended", "ac1 is suspended"); + var osc1 = ac1.createOscillator(), + mediaStreamDestination1 = ac1.createMediaStreamDestination(); + + var mediaStreamAudioSourceNode2 = ac2.createMediaStreamSource( + mediaStreamDestination1.stream + ), + sp2 = ac2.createScriptProcessor(), + silentBuffersInARow = 0; + + osc1.connect(mediaStreamDestination1); + mediaStreamAudioSourceNode2.connect(sp2); + osc1.start(); + + let e = await new Promise((resolve) => (sp2.onaudioprocess = resolve)); + + while (true) { + let e = await new Promise( + (resolve) => (sp2.onaudioprocess = resolve) + ); + var input = e.inputBuffer.getChannelData(0); + var silent = true; + for (var i = 0; i < input.length; i++) { + if (input[i] != 0.0) { + silent = false; + } + } + + if (silent) { + silentBuffersInARow++; + if (silentBuffersInARow == 10) { + assert_true( + true, + "MediaStreams produce silence when their input is blocked." + ); + break; + } + } else { + assert_equals( + silentBuffersInARow, + 0, + "No non silent buffer inbetween silent buffers." + ); + } + } + + sp2.onaudioprocess = null; + ac1.close(); + ac2.close(); +} + +// Test that there is no buffering between contexts when connecting a running +// AudioContext to a suspended AudioContext. Gecko's ScriptProcessorNode does some +// buffering internally, so we ensure this by using a very very low frequency +// on a sine, and oberve that the phase has changed by a big enough margin. +async function testMultiContextInput() { + var ac1 = new AudioContext(), + ac2 = new AudioContext(); + + await new Promise((resolve) => (ac1.onstatechange = resolve)); + ac1.onstatechange = null; + + var osc1 = ac1.createOscillator(), + mediaStreamDestination1 = ac1.createMediaStreamDestination(), + sp1 = ac1.createScriptProcessor(); + + var mediaStreamAudioSourceNode2 = ac2.createMediaStreamSource( + mediaStreamDestination1.stream + ), + sp2 = ac2.createScriptProcessor(), + eventReceived = 0; + + osc1.frequency.value = 0.0001; + osc1.connect(mediaStreamDestination1); + osc1.connect(sp1); + mediaStreamAudioSourceNode2.connect(sp2); + osc1.start(); + + var e = await new Promise((resolve) => (sp2.onaudioprocess = resolve)); + var inputBuffer1 = e.inputBuffer.getChannelData(0); + sp2.value = inputBuffer1[inputBuffer1.length - 1]; + await ac2.suspend(); + await ac2.resume(); + + while (true) { + var e = await new Promise( + (resolve) => (sp2.onaudioprocess = resolve) + ); + var inputBuffer = e.inputBuffer.getChannelData(0); + if (eventReceived++ == 3) { + var delta = Math.abs(inputBuffer[1] - sp2.value), + theoreticalIncrement = + (2048 * 3 * Math.PI * 2 * osc1.frequency.value) / + ac1.sampleRate; + assert_true( + delta >= theoreticalIncrement, + "Buffering did not occur when the context was suspended (delta:" + + delta + + " increment: " + + theoreticalIncrement + + ")" + ); + break; + } + } + ac1.close(); + ac2.close(); + sp1.onaudioprocess = null; + sp2.onaudioprocess = null; +} + +// Take an AudioContext, make sure it switches to running when the audio starts +// flowing, and then, call suspend, resume and close on it, tracking its state. +async function testAudioContext() { + var ac = new AudioContext(); + assert_equals( + ac.state, + "suspended", + "AudioContext should start in suspended state." + ); + var stateTracker = { + previous: ac.state, + // no promise for the initial suspended -> running + initial: { handler: false }, + suspend: { promise: false, handler: false }, + resume: { promise: false, handler: false }, + close: { promise: false, handler: false }, + }; + + await new Promise((resolve) => (ac.onstatechange = resolve)); + + assert_true( + stateTracker.previous == "suspended" && ac.state == "running", + 'AudioContext should switch to "running" when the audio hardware is' + + " ready." + ); + + stateTracker.previous = ac.state; + stateTracker.initial.handler = true; + + let promise_statechange_suspend = new Promise((resolve) => { + ac.onstatechange = resolve; + }).then(() => { + stateTracker.suspend.handler = true; + }); + await ac.suspend(); + assert_true( + !stateTracker.suspend.handler, + "Promise should be resolved before the callback." + ); + assert_equals( + ac.state, + "suspended", + 'AudioContext should switch to "suspended" when the audio stream is ' + + "suspended." + ); + await promise_statechange_suspend; + stateTracker.previous = ac.state; + + let promise_statechange_resume = new Promise((resolve) => { + ac.onstatechange = resolve; + }).then(() => { + stateTracker.resume.handler = true; + }); + await ac.resume(); + assert_true( + !stateTracker.resume.handler, + "Promise should be resolved before the callback." + ); + assert_equals( + ac.state, + "running", + 'AudioContext should switch to "running" when the audio stream is ' + + "resumed." + ); + await promise_statechange_resume; + stateTracker.previous = ac.state; + + let promise_statechange_close = new Promise((resolve) => { + ac.onstatechange = resolve; + }).then(() => { + stateTracker.close.handler = true; + }); + await ac.close(); + assert_true( + !stateTracker.close.handler, + "Promise should be resolved before the callback." + ); + assert_equals( + ac.state, + "closed", + 'AudioContext should switch to "closed" when the audio stream is ' + + "closed." + ); + await promise_statechange_close; + stateTracker.previous = ac.state; + + tryToCreateNodeOnClosedContext(ac); + await tryLegalOpeerationsOnClosedContext(ac); +} + +async function testOfflineAudioContext() { + var o = new OfflineAudioContext(1, 44100, 44100); + assert_equals( + o.state, + "suspended", + "OfflineAudioContext should start in suspended state." + ); + + var previousState = o.state, + finishedRendering = false; + + o.startRendering().then(function (buffer) { + finishedRendering = true; + }); + + await new Promise((resolve) => (o.onstatechange = resolve)); + + assert_true( + previousState == "suspended" && o.state == "running", + "onstatechanged" + + "handler is called on state changed, and the new state is running" + ); + previousState = o.state; + await new Promise((resolve) => (o.onstatechange = resolve)); + assert_true( + previousState == "running" && o.state == "closed", + "onstatechanged handler is called when rendering finishes, " + + "and the new state is closed" + ); + assert_true( + finishedRendering, + "The Promise that is resolved when the rendering is " + + "done should be resolved earlier than the state change." + ); + previousState = o.state; + function afterRenderingFinished() { + assert_true( + false, + "There should be no transition out of the closed state." + ); + } + o.onstatechange = afterRenderingFinished; + + tryToCreateNodeOnClosedContext(o); + await tryLegalOpeerationsOnClosedContext(o); +} + +async function testSuspendResumeEventLoop() { + var ac = new AudioContext(); + var source = ac.createBufferSource(); + source.buffer = ac.createBuffer(1, 44100, 44100); + await new Promise((resolve) => (ac.onstatechange = resolve)); + ac.onstatechange = null; + assert_true(ac.state == "running", "initial state is running"); + await ac.suspend(); + source.start(); + ac.resume(); + await new Promise((resolve) => (source.onended = resolve)); + assert_true(true, "The AudioContext did resume"); +} + +function testResumeInStateChangeForResumeCallback() { + return new Promise((resolve) => { + var ac = new AudioContext(); + ac.onstatechange = function () { + ac.resume().then(() => { + assert_true(true, "resume promise resolved as expected."); + resolve(); + }); + }; + }); +} + +var tests = [ + testOfflineAudioContext, + testMultiContextOutput, + testMultiContextInput, + testSuspendResumeEventLoop, + testResumeInStateChangeForResumeCallback, + testAudioContext, +]; + +tests.forEach(function (f) { + promise_test(f, f.name); +}); + </script> + </head> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html new file mode 100644 index 0000000000..ff3daebf39 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html @@ -0,0 +1,145 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test AudioContext.suspend() and AudioContext.resume() + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let offlineContext; + let osc; + let p1; + let p2; + let p3; + + let sampleRate = 44100; + let durationInSeconds = 1; + + let audit = Audit.createTaskRunner(); + + // Task: test suspend(). + audit.define( + { + label: 'test-suspend', + description: 'Test suspend() for offline context' + }, + function(task, should) { + // Test suspend/resume. Ideally this test is best with a online + // AudioContext, but content shell doesn't really have a working + // online AudioContext. Hence, use an OfflineAudioContext. Not all + // possible scenarios can be easily checked with an offline context + // instead of an online context. + + // Create an audio context with an oscillator. + should( + () => { + offlineContext = new OfflineAudioContext( + 1, durationInSeconds * sampleRate, sampleRate); + }, + 'offlineContext = new OfflineAudioContext(1, ' + + (durationInSeconds * sampleRate) + ', ' + sampleRate + ')') + .notThrow(); + osc = offlineContext.createOscillator(); + osc.connect(offlineContext.destination); + + // Verify the state. + should(offlineContext.state, 'offlineContext.state') + .beEqualTo('suspended'); + + // Multiple calls to suspend() should not be a problem. But we can't + // test that on an offline context. Thus, check that suspend() on + // an OfflineAudioContext rejects the promise. + should( + () => p1 = offlineContext.suspend(), + 'p1 = offlineContext.suspend()') + .notThrow(); + should(p1 instanceof Promise, 'p1 instanceof Promise').beTrue(); + + should(p1, 'p1').beRejected().then(task.done.bind(task)); + }); + + + // Task: test resume(). + audit.define( + { + label: 'test-resume', + description: 'Test resume() for offline context' + }, + function(task, should) { + // Multiple calls to resume should not be a problem. But we can't + // test that on an offline context. Thus, check that resume() on an + // OfflineAudioContext rejects the promise. + should( + () => p2 = offlineContext.resume(), + 'p2 = offlineContext.resume()') + .notThrow(); + should(p2 instanceof Promise, 'p2 instanceof Promise').beTrue(); + + // Resume doesn't actually resume an offline context + should(offlineContext.state, 'After resume, offlineContext.state') + .beEqualTo('suspended'); + should(p2, 'p2').beRejected().then(task.done.bind(task)); + }); + + // Task: test the state after context closed. + audit.define( + { + label: 'test-after-close', + description: 'Test state after context closed' + }, + function(task, should) { + // Render the offline context. + osc.start(); + + // Test suspend/resume in tested promise pattern. We don't care + // about the actual result of the offline rendering. + should( + () => p3 = offlineContext.startRendering(), + 'p3 = offlineContext.startRendering()') + .notThrow(); + + p3.then(() => { + should(offlineContext.state, 'After close, offlineContext.state') + .beEqualTo('closed'); + + // suspend() should be rejected on a closed context. + should(offlineContext.suspend(), 'offlineContext.suspend()') + .beRejected() + .then(() => { + // resume() should be rejected on closed context. + should(offlineContext.resume(), 'offlineContext.resume()') + .beRejected() + .then(task.done.bind(task)); + }) + }); + }); + + audit.define( + { + label: 'resume-running-context', + description: 'Test resuming a running context' + }, + (task, should) => { + let context; + should(() => context = new AudioContext(), 'Create online context') + .notThrow(); + + should(context.state, 'context.state').beEqualTo('suspended'); + should(context.resume(), 'context.resume') + .beResolved() + .then(() => { + should(context.state, 'context.state after resume') + .beEqualTo('running'); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html new file mode 100644 index 0000000000..136abedaa8 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html @@ -0,0 +1,215 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test AudioContextOptions + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let context; + let defaultLatency; + let interactiveLatency; + let balancedLatency; + let playbackLatency; + + let audit = Audit.createTaskRunner(); + + audit.define( + { + label: 'test-audiocontextoptions-latencyHint-basic', + description: 'Test creating contexts with basic latencyHint types.' + }, + function(task, should) { + let closingPromises = []; + + // Verify that an AudioContext can be created with default options. + should(function() { + context = new AudioContext() + }, 'context = new AudioContext()').notThrow(); + + should(context.sampleRate, + `context.sampleRate (${context.sampleRate} Hz)`).beGreaterThan(0); + + defaultLatency = context.baseLatency; + should(defaultLatency, 'default baseLatency').beGreaterThanOrEqualTo(0); + + // Verify that an AudioContext can be created with the expected + // latency types. + should( + function() { + context = new AudioContext({'latencyHint': 'interactive'}) + }, + 'context = new AudioContext({\'latencyHint\': \'interactive\'})') + .notThrow(); + + interactiveLatency = context.baseLatency; + should(interactiveLatency, 'interactive baseLatency') + .beEqualTo(defaultLatency); + closingPromises.push(context.close()); + + should( + function() { + context = new AudioContext({'latencyHint': 'balanced'}) + }, + 'context = new AudioContext({\'latencyHint\': \'balanced\'})') + .notThrow(); + + balancedLatency = context.baseLatency; + should(balancedLatency, 'balanced baseLatency') + .beGreaterThanOrEqualTo(interactiveLatency); + closingPromises.push(context.close()); + + should( + function() { + context = new AudioContext({'latencyHint': 'playback'}) + }, + 'context = new AudioContext({\'latencyHint\': \'playback\'})') + .notThrow(); + + playbackLatency = context.baseLatency; + should(playbackLatency, 'playback baseLatency') + .beGreaterThanOrEqualTo(balancedLatency); + closingPromises.push(context.close()); + + Promise.all(closingPromises).then(function() { + task.done(); + }); + }); + + audit.define( + { + label: 'test-audiocontextoptions-latencyHint-double', + description: + 'Test creating contexts with explicit latencyHint values.' + }, + function(task, should) { + let closingPromises = []; + + // Verify too small exact latency clamped to 'interactive' + should( + function() { + context = + new AudioContext({'latencyHint': interactiveLatency / 2}) + }, + 'context = new AudioContext({\'latencyHint\': ' + + 'interactiveLatency/2})') + .notThrow(); + should(context.baseLatency, 'double-constructor baseLatency small') + .beLessThanOrEqualTo(interactiveLatency); + closingPromises.push(context.close()); + + // Verify that exact latency in range works as expected + let validLatency = (interactiveLatency + playbackLatency) / 2; + should( + function() { + context = new AudioContext({'latencyHint': validLatency}) + }, + 'context = new AudioContext({\'latencyHint\': validLatency})') + .notThrow(); + should( + context.baseLatency, 'double-constructor baseLatency inrange 1') + .beGreaterThanOrEqualTo(interactiveLatency); + should( + context.baseLatency, 'double-constructor baseLatency inrange 2') + .beLessThanOrEqualTo(playbackLatency); + closingPromises.push(context.close()); + + // Verify too big exact latency clamped to some value + let context1; + let context2; + should(function() { + context1 = + new AudioContext({'latencyHint': playbackLatency * 10}); + context2 = + new AudioContext({'latencyHint': playbackLatency * 20}); + }, 'creating two high latency contexts').notThrow(); + should(context1.baseLatency, 'high latency context baseLatency') + .beEqualTo(context2.baseLatency); + should(context1.baseLatency, 'high latency context baseLatency') + .beGreaterThanOrEqualTo(interactiveLatency); + closingPromises.push(context1.close()); + closingPromises.push(context2.close()); + + // Verify that invalid latencyHint values are rejected. + should( + function() { + context = new AudioContext({'latencyHint': 'foo'}) + }, + 'context = new AudioContext({\'latencyHint\': \'foo\'})') + .throw(TypeError); + + // Verify that no extra options can be passed into the + // AudioContextOptions. + should( + function() { + context = new AudioContext('latencyHint') + }, + 'context = new AudioContext(\'latencyHint\')') + .throw(TypeError); + + Promise.all(closingPromises).then(function() { + task.done(); + }); + }); + + audit.define( + { + label: 'test-audiocontextoptions-sampleRate', + description: + 'Test creating contexts with non-default sampleRate values.' + }, + function(task, should) { + // A sampleRate of 1 is unlikely to be supported on any browser, + // test that this rate is rejected. + should( + () => { + context = new AudioContext({sampleRate: 1}) + }, + 'context = new AudioContext({sampleRate: 1})') + .throw(DOMException, 'NotSupportedError'); + + // A sampleRate of 1,000,000 is unlikely to be supported on any + // browser, test that this rate is also rejected. + should( + () => { + context = new AudioContext({sampleRate: 1000000}) + }, + 'context = new AudioContext({sampleRate: 1000000})') + .throw(DOMException, 'NotSupportedError'); + // A negative sample rate should not be accepted + should( + () => { + context = new AudioContext({sampleRate: -1}) + }, + 'context = new AudioContext({sampleRate: -1})') + .throw(DOMException, 'NotSupportedError'); + // A null sample rate should not be accepted + should( + () => { + context = new AudioContext({sampleRate: 0}) + }, + 'context = new AudioContext({sampleRate: 0})') + .throw(DOMException, 'NotSupportedError'); + + should( + () => { + context = new AudioContext({sampleRate: 24000}) + }, + 'context = new AudioContext({sampleRate: 24000})') + .notThrow(); + should( + context.sampleRate, 'sampleRate inrange') + .beEqualTo(24000); + + context.close(); + task.done(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html new file mode 100644 index 0000000000..f866b5f7a1 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html @@ -0,0 +1,25 @@ +<!doctype html> +<title>AudioContext state around "allowed to start" in constructor</title> +<link rel=help href=https://webaudio.github.io/web-audio-api/#dom-audiocontext-audiocontext> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script src=/resources/testdriver.js></script> +<script src=/resources/testdriver-vendor.js></script> +<script> +setup({ single_test: true }); +test_driver.bless("audio playback", () => { + const ctx = new AudioContext(); + // Immediately after the constructor the state is "suspended" because the + // control message to start processing has just been sent, but the state + // should change soon. + assert_equals(ctx.state, "suspended", "initial state"); + ctx.onstatechange = () => { + assert_equals(ctx.state, "running", "state after statechange event"); + // Now create another context and ensure it starts out in the "suspended" + // state too, ensuring it's not synchronously "running". + const ctx2 = new AudioContext(); + assert_equals(ctx2.state, "suspended", "initial state of 2nd context"); + done(); + }; +}); +</script> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html new file mode 100644 index 0000000000..8c74bd0aa1 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html @@ -0,0 +1,14 @@ +<html> +<head> + <title> + Test currentTime after browsing context discard + </title> +</head> +<script> + const frame = document.createElement('frame'); + document.documentElement.appendChild(frame); + const ctx = new frame.contentWindow.AudioContext(); + frame.remove(); + ctx.currentTime; +</script> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html new file mode 100644 index 0000000000..e000ab124f --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html @@ -0,0 +1,55 @@ +<!doctype html> +<title>Test consistency of processing after resume()</title> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script> +const get_node_and_reply = (context) => { + const node = new AudioWorkletNode(context, 'port-processor'); + return new Promise((resolve) => { + node.port.onmessage = (event) => resolve({node: node, reply: event.data}); + }); +}; +const ping_for_reply = (node) => { + return new Promise((resolve) => { + node.port.onmessage = (event) => resolve(event.data); + node.port.postMessage('ping'); + }); +}; +const assert_consistent = (constructReply, pong, expectedPongTime, name) => { + const blockSize = 128; + assert_equals(pong.timeStamp, expectedPongTime, `${name} pong time`); + assert_equals(pong.processCallCount * blockSize, + pong.currentFrame - constructReply.currentFrame, + `${name} processed frame count`); +}; +const modulePath = '/webaudio/the-audio-api/' + + 'the-audioworklet-interface/processors/port-processor.js'; + +promise_test(async () => { + const realtime = new AudioContext(); + await realtime.audioWorklet.addModule(modulePath); + await realtime.suspend(); + const timeBeforeResume = realtime.currentTime; + // Two AudioWorkletNodes are constructed. + // node1 is constructed before and node2 after the resume() call. + const construct1 = get_node_and_reply(realtime); + const resume = realtime.resume(); + const construct2 = get_node_and_reply(realtime); + const {node: node1, reply: constructReply1} = await construct1; + assert_equals(constructReply1.timeStamp, timeBeforeResume, + 'construct time before resume'); + const {node: node2, reply: constructReply2} = await construct2; + assert_greater_than_equal(constructReply2.timeStamp, timeBeforeResume, + 'construct time after resume'); + await resume; + // Suspend the context to freeze time and check that the processing for each + // node matches the elapsed time. + await realtime.suspend(); + const timeAfterSuspend = realtime.currentTime; + const pong1 = await ping_for_reply(node1); + const pong2 = await ping_for_reply(node2); + assert_consistent(constructReply1, pong1, timeAfterSuspend, 'node1'); + assert_consistent(constructReply2, pong2, timeAfterSuspend, 'node2'); + assert_equals(pong1.currentFrame, pong2.currentFrame, 'currentFrame matches'); +}); +</script> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html new file mode 100644 index 0000000000..2fb3c5a50b --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html @@ -0,0 +1,28 @@ +<!doctype html> +<title>Test for rejected promises from methods on an AudioContext in a + discarded browsing context</title> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<body></body> +<script> +let context; +let childDOMException; +setup(() => { + const frame = document.createElement('iframe'); + document.body.appendChild(frame); + context = new frame.contentWindow.AudioContext(); + childDOMException = frame.contentWindow.DOMException; + frame.remove(); +}); + +promise_test((t) => promise_rejects_dom(t, 'InvalidStateError', + childDOMException, context.suspend()), + 'suspend()'); +promise_test((t) => promise_rejects_dom(t, 'InvalidStateError', + childDOMException, context.resume()), + 'resume()'); +promise_test((t) => promise_rejects_dom(t, 'InvalidStateError', + childDOMException, context.close()), + 'close()'); +// decodeAudioData() is tested in audiocontext-detached-execution-context.html +</script> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html new file mode 100644 index 0000000000..2654a2a504 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html @@ -0,0 +1,22 @@ +<!doctype html> +<html> +<iframe src="{{GET[childsrc]}}"> +</iframe> +<script> +const frame = document.getElementsByTagName('iframe')[0]; +const reply = op => window.parent.postMessage('DONE ' + op, '*'); + +window.onmessage = e => { + switch (e.data) { + case 'REMOVE FRAME': + frame.remove(); + reply(e.data); + break; + case 'NAVIGATE FRAME': + frame.srcdoc = '<html></html>'; + frame.onload = () => reply(e.data); + break; + } +}; +</script> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html new file mode 100644 index 0000000000..596a825c3d --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html @@ -0,0 +1,72 @@ +<!doctype html> +<title>Test AudioContext state updates with suspend() shortly after + construction</title> +<script src=/resources/testharness.js></script> +<script src=/resources/testharnessreport.js></script> +<script> +// A separate async_test is used for tracking state change counts so that it +// can report excess changes after the promise_test for the iteration has +// completed. +const changeCountingTest = async_test('State change counting'); + +const doTest = async (testCount) => { + const ctx = new AudioContext(); + // Explicitly resume to get a promise to indicate whether the context + // successfully started running. + const resume = ctx.resume(); + const suspend = ctx.suspend(); + let stateChangesDone = new Promise((resolve) => { + ctx.onstatechange = () => { + ++ctx.stateChangeCount; + changeCountingTest.step(() => { + assert_less_than_equal(ctx.stateChangeCount, + ctx.expectedStateChangeCount, + `ctx ${testCount} state change count.`); + assert_equals(ctx.state, ctx.expectedState, `ctx ${testCount} state`); + }); + if (ctx.stateChangeCount == ctx.totalStateChangeCount) { + resolve(); + } + }; + }); + ctx.stateChangeCount = 0; + ctx.expectedStateChangeCount = 1; + ctx.expectedState = 'running'; + ctx.totalStateChangeCount = 2; + let resumeState = 'pending'; + resume.then(() => { + resumeState = 'fulfilled'; + assert_equals(ctx.state, 'running', 'state on resume fulfilled.'); + }).catch(() => { + // The resume() promise may be rejected if "Attempt to acquire system + // resources" fails. The spec does not discuss the possibility of a + // subsequent suspend causing such a failure, but accept this as a + // reasonable behavior. + resumeState = 'rejected'; + assert_equals(ctx.state, 'suspended', 'state on resume rejected.'); + assert_equals(ctx.stateChangeCount, 0); + ctx.expectedStateChangeCount = 0; + stateChangesDone = Promise.resolve(); + }); + suspend.then(() => { + assert_not_equals(resumeState, 'pending', + 'resume promise should settle before suspend promise.') + if (resumeState == 'fulfilled') { + ++ctx.expectedStateChangeCount; + } + ctx.expectedState = 'suspended'; + assert_equals(ctx.state, 'suspended', 'state on suspend fulfilled.'); + }); + await resume; + await suspend; + await stateChangesDone; +}; + +// Repeat the test because Gecko uses different code when there is more than +// one AudioContext. The third run provides time to check that no further +// state changes from the second run are pending. +for (const testCount of [1, 2, 3]) { + promise_test(() => { return doTest(testCount); }, `Iteration ${testCount}`); +} +promise_test(async () => changeCountingTest.done(), 'Stop waiting'); +</script> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html new file mode 100644 index 0000000000..b9328ae95d --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html @@ -0,0 +1,65 @@ +<!doctype html> +<meta name="timeout" content="long"> +<title>AudioContext.suspend() with navigation</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script src="/common/utils.js"></script> +<script src="/common/dispatcher/dispatcher.js"></script> +<script src="/html/browsers/browsing-the-web/back-forward-cache/resources/helper.sub.js"></script> +<script> +'use strict'; +runBfcacheTest({ + funcBeforeNavigation: async () => { + window.promise_event = (target, name) => { + return new Promise(resolve => target[`on${name}`] = resolve); + }; + window.promise_source_ended = (audioCtx) => { + const source = new ConstantSourceNode(audioCtx); + source.start(0); + source.stop(audioCtx.currentTime + 1/audioCtx.sampleRate); + return promise_event(source, "ended"); + }; + + window.suspended_ctx = new AudioContext(); + // Perform the equivalent of test_driver.bless() to request a user gesture + // for when the test is run from a browser. test_driver would need to be + // able to postMessage() to the test context, which is not available due + // to window.open() being called with noopener (for back/forward cache). + // Audio autoplay is expected to be allowed when run through webdriver + // from `wpt run`. + let button = document.createElement('button'); + button.innerHTML = 'This test requires user interaction.<br />' + + 'Please click here to allow AudioContext.'; + document.body.appendChild(button); + button.addEventListener('click', () => { + document.body.removeChild(button); + suspended_ctx.resume(); + }, {once: true}); + // Wait for user gesture, if required. + await suspended_ctx.resume(); + await suspended_ctx.suspend(); + window.ended_promise = promise_source_ended(suspended_ctx); + }, + funcAfterAssertion: async (pageA) => { + const state = await pageA.execute_script(() => suspended_ctx.state); + assert_equals(state, 'suspended', 'state after back()'); + const first_ended = await pageA.execute_script(async () => { + // Wait for an ended event from a running AudioContext to provide enough + // time to check that the ended event has not yet been dispatched from + // the suspended ctx. + const running_ctx = new AudioContext(); + await running_ctx.resume(); + return Promise.race([ + ended_promise.then(() => 'suspended_ctx'), + promise_source_ended(running_ctx).then(() => 'running_ctx'), + ]); + }); + assert_equals(first_ended, 'running_ctx', + 'AudioContext of first ended event'); + await pageA.execute_script(() => { + window.suspended_ctx.resume(); + return ended_promise; + }); + }, +}, 'suspend() with navigation'); +</script> |