diff options
Diffstat (limited to 'testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface')
21 files changed, 2097 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html new file mode 100644 index 0000000000..0fa3089a34 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html @@ -0,0 +1,100 @@ +<!doctype html> +<html> + <head> + <title> + Test Active Processing for AudioBufferSourceNode + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + // Arbitrary sample rate. And we only new a few blocks for rendering to + // see if things are working. + let sampleRate = 8000; + let renderLength = 10 * RENDER_QUANTUM_FRAMES; + + // Offline context used for the tests. + let context; + + // Number of channels for the AudioBufferSource. Fairly arbitrary, but + // should be more than 2. + let numberOfChannels = 7; + + // Number of frames in the AudioBuffer. Fairly arbitrary, but should + // probablybe more than one render quantum and significantly less than + // |renderLength|. + let bufferFrames = 131; + + let filePath = + '../the-audioworklet-interface/processors/input-count-processor.js'; + + audit.define('Setup graph', (task, should) => { + context = + new OfflineAudioContext(numberOfChannels, renderLength, sampleRate); + + should( + context.audioWorklet.addModule(filePath).then(() => { + let buffer = new AudioBuffer({ + numberOfChannels: numberOfChannels, + length: bufferFrames, + sampleRate: context.sampleRate + }); + + src = new AudioBufferSourceNode(context, {buffer: buffer}); + let counter = new AudioWorkletNode(context, 'counter'); + + src.connect(counter).connect(context.destination); + src.start(); + }), + 'AudioWorklet and graph construction') + .beResolved() + .then(() => task.done()); + }); + + audit.define('verify count change', (task, should) => { + context.startRendering() + .then(renderedBuffer => { + let output = renderedBuffer.getChannelData(0); + + // Find the first time the number of channels changes to 1. + let countChangeIndex = output.findIndex(x => x == 1); + + // Verify that the count did change. If it didn't there's a bug + // in the imploementation, or it takes longer than the render + // length to change. for the latter case, increase the render + // length, but it can't be arbitrarily large. The change needs to + // happen at some reasonable time after the source stops. + should(countChangeIndex >= 0, 'Number of channels changed') + .beTrue(); + should( + countChangeIndex, 'Index where input channel count changed') + .beLessThanOrEqualTo(renderLength); + + // Verify the number of channels at the beginning matches the + // number of channels in the AudioBuffer. + should( + output.slice(0, countChangeIndex), + `Number of channels in input[0:${countChangeIndex - 1}]`) + .beConstantValueOf(numberOfChannels); + + // Verify that after the source has stopped, the number of + // channels is 1. + should( + output.slice(countChangeIndex), + `Number of channels in input[${countChangeIndex}:]`) + .beConstantValueOf(1); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html new file mode 100644 index 0000000000..6ce7eb0c10 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html @@ -0,0 +1,37 @@ +<!doctype html> +<html> + <head> + <title> + Basic Test of AudioBufferSourceNode + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/start-stop-exceptions.js"></script> + </head> + <script id="layout-test-code"> + let sampleRate = 44100; + let renderLengthSeconds = 0.25; + + let oscTypes = ['sine', 'square', 'sawtooth', 'triangle', 'custom']; + + let audit = Audit.createTaskRunner(); + + audit.define('start/stop exceptions', (task, should) => { + // We're not going to render anything, so make it simple + let context = new OfflineAudioContext(1, 1, sampleRate); + let node = new AudioBufferSourceNode(context); + + testStartStop(should, node, [ + {args: [0, -1], errorType: RangeError}, + {args: [0, 0, -1], errorType: RangeError} + ]); + task.done(); + }); + + audit.run(); + </script> + <body> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html new file mode 100644 index 0000000000..f3f16c4c64 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html @@ -0,0 +1,97 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audiobuffersource-channels.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + let context; + let source; + + audit.define( + { + label: 'validate .buffer', + description: + 'Validatation of AudioBuffer in .buffer attribute setter' + }, + function(task, should) { + context = new AudioContext(); + source = context.createBufferSource(); + + // Make sure we can't set to something which isn't an AudioBuffer. + should(function() { + source.buffer = 57; + }, 'source.buffer = 57').throw(TypeError); + + // It's ok to set the buffer to null. + should(function() { + source.buffer = null; + }, 'source.buffer = null').notThrow(); + + // Set the buffer to a valid AudioBuffer + let buffer = + new AudioBuffer({length: 128, sampleRate: context.sampleRate}); + + should(function() { + source.buffer = buffer; + }, 'source.buffer = buffer').notThrow(); + + // The buffer has been set; we can't set it again. + should(function() { + source.buffer = + new AudioBuffer({length: 128, sampleRate: context.sampleRate}) + }, 'source.buffer = new buffer').throw(DOMException, 'InvalidStateError'); + + // The buffer has been set; it's ok to set it to null. + should(function() { + source.buffer = null; + }, 'source.buffer = null again').notThrow(); + + // The buffer was already set (and set to null). Can't set it + // again. + should(function() { + source.buffer = buffer; + }, 'source.buffer = buffer again').throw(DOMException, 'InvalidStateError'); + + // But setting to null is ok. + should(function() { + }, 'source.buffer = null after setting to null').notThrow(); + + // Check that mono buffer can be set. + should(function() { + let monoBuffer = + context.createBuffer(1, 1024, context.sampleRate); + let testSource = context.createBufferSource(); + testSource.buffer = monoBuffer; + }, 'Setting source with mono buffer').notThrow(); + + // Check that stereo buffer can be set. + should(function() { + let stereoBuffer = + context.createBuffer(2, 1024, context.sampleRate); + let testSource = context.createBufferSource(); + testSource.buffer = stereoBuffer; + }, 'Setting source with stereo buffer').notThrow(); + + // Check buffers with more than two channels. + for (let i = 3; i < 10; ++i) { + should(function() { + let buffer = context.createBuffer(i, 1024, context.sampleRate); + let testSource = context.createBufferSource(); + testSource.buffer = buffer; + }, 'Setting source with ' + i + ' channels buffer').notThrow(); + } + task.done(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html new file mode 100644 index 0000000000..abb8983cc0 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html @@ -0,0 +1,52 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test AudioBufferSourceNode With Looping And Duration + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + audit.define('loop with duration', (task, should) => { + // Create the context + let context = new OfflineAudioContext(1, 4096, 48000); + + // Create the sample buffer and fill the second half with 1 + let buffer = context.createBuffer(1, 2048, context.sampleRate); + for (let i = 1024; i < 2048; i++) { + buffer.getChannelData(0)[i] = 1; + } + + // Create the source and set its value + let source = context.createBufferSource(); + source.loop = true; + source.loopStart = 1024 / context.sampleRate; + source.loopEnd = 2048 / context.sampleRate; + source.buffer = buffer; + source.connect(context.destination); + source.start(0, 1024 / context.sampleRate, 2048 / context.sampleRate); + // Expectations + let expected = new Float32Array(4096); + for (let i = 0; i < 2048; i++) { + expected[i] = 1; + } + // Render it! + context.startRendering() + .then(function(audioBuffer) { + should( + audioBuffer.getChannelData(0), 'audioBuffer.getChannelData') + .beEqualToArray(expected); + }) + .then(task.done()); + }); + + audit.run(); + + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html new file mode 100644 index 0000000000..b9922f61ef --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html @@ -0,0 +1,40 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audiobuffersource-ended.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/audiobuffersource-testing.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + let context; + let source; + + audit.define( + 'AudioBufferSourceNode calls its onended EventListener', + function(task, should) { + let sampleRate = 44100.0; + let numberOfFrames = 32; + context = new OfflineAudioContext(1, numberOfFrames, sampleRate); + source = context.createBufferSource(); + source.buffer = createTestBuffer(context, numberOfFrames); + source.connect(context.destination); + source.onended = function() { + should(true, 'source.onended called').beTrue(); + task.done(); + }; + source.start(0); + context.startRendering(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html new file mode 100644 index 0000000000..f554304a21 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html @@ -0,0 +1,71 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test Start Grain with Delayed Buffer Setting + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + let context; + let source; + let buffer; + let renderedData; + + let sampleRate = 44100; + + let testDurationSec = 1; + let testDurationSamples = testDurationSec * sampleRate; + let startTime = 0.9 * testDurationSec; + + audit.define( + 'Test setting the source buffer after starting the grain', + function(task, should) { + context = + new OfflineAudioContext(1, testDurationSamples, sampleRate); + + buffer = createConstantBuffer(context, testDurationSamples, 1); + source = context.createBufferSource(); + source.connect(context.destination); + + // Start the source BEFORE we set the buffer. The grain offset and + // duration aren't important, as long as we specify some offset. + source.start(startTime, .1); + source.buffer = buffer; + + // Render it! + context.startRendering() + .then(function(buffer) { + checkResult(buffer, should); + }) + .then(task.done.bind(task)); + ; + }); + + function checkResult(buffer, should) { + let success = false; + + renderedData = buffer.getChannelData(0); + + // Check that the rendered data is not all zeroes. Any non-zero data + // means the test passed. + let startFrame = Math.round(startTime * sampleRate); + for (k = 0; k < renderedData.length; ++k) { + if (renderedData[k]) { + success = true; + break; + } + } + + should(success, 'Buffer was played').beTrue(); + } + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html new file mode 100644 index 0000000000..4e0de21e96 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html @@ -0,0 +1,78 @@ +<!DOCTYPE html> +<!-- +Test AudioBufferSourceNode supports 5.1 channel. +--> +<html> + <head> + <title> + audiobuffersource-multi-channels.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/mix-testing.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + let context; + let expectedAudio; + + audit.define('initialize', (task, should) => { + // Create offline audio context + let sampleRate = 44100.0; + should(() => { + context = new OfflineAudioContext( + 6, sampleRate * toneLengthSeconds, sampleRate); + }, 'Creating context for testing').notThrow(); + should( + Audit + .loadFileFromUrl('resources/audiobuffersource-multi-channels-expected.wav') + .then(arrayBuffer => { + context.decodeAudioData(arrayBuffer).then(audioBuffer => { + expectedAudio = audioBuffer; + task.done(); + }).catch(error => { + assert_unreached("Could not decode audio data due to " + error.message); + }) + }) + , 'Fetching expected audio').beResolved(); + }); + + audit.define( + {label: 'test', description: 'AudioBufferSource with 5.1 buffer'}, + (task, should) => { + let toneBuffer = + createToneBuffer(context, 440, toneLengthSeconds, 6); + + let source = context.createBufferSource(); + source.buffer = toneBuffer; + + source.connect(context.destination); + source.start(0); + + context.startRendering() + .then(renderedAudio => { + // Compute a threshold based on the maximum error, |maxUlp|, + // in ULP. This is experimentally determined. Assuming that + // the reference file is a 16-bit wav file, the max values in + // the wave file are +/- 32768. + let maxUlp = 1; + let threshold = maxUlp / 32768; + for (let k = 0; k < renderedAudio.numberOfChannels; ++k) { + should( + renderedAudio.getChannelData(k), + 'Rendered audio for channel ' + k) + .beCloseToArray( + expectedAudio.getChannelData(k), + {absoluteThreshold: threshold}); + } + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html new file mode 100644 index 0000000000..b5b1ec0c3d --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html @@ -0,0 +1,59 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test ABSN Outputs Silence if buffer is null + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + + <body> + <script> + const audit = Audit.createTaskRunner(); + + audit.define('ABSN with null buffer', (task, should) => { + // Create test context. Length and sampleRate are pretty arbitrary, but + // we don't need either to be very large. + const context = new OfflineAudioContext( + {numberOfChannels: 1, length: 1024, sampleRate: 8192}); + + // Just create a constant buffer for testing. Anything will do as long + // as the buffer contents are not identically zero. + const audioBuffer = + new AudioBuffer({length: 10, sampleRate: context.sampleRate}); + const audioBufferSourceNode = new AudioBufferSourceNode(context); + + audioBuffer.getChannelData(0).fill(1); + + // These two tests are mostly for the informational messages to show + // what's happening. They should never fail! + should(() => { + audioBufferSourceNode.buffer = audioBuffer; + }, 'Setting ABSN.buffer to AudioBuffer').notThrow(); + + // This is the important part. Setting the buffer to null after setting + // it to something else should cause the source to produce silence. + should(() => { + audioBufferSourceNode.buffer = null; + }, 'Setting ABSN.buffer = null').notThrow(); + + audioBufferSourceNode.start(0); + audioBufferSourceNode.connect(context.destination); + + context.startRendering() + .then(buffer => { + // Since the buffer is null, the output of the source should be + // silence. + should(buffer.getChannelData(0), 'ABSN output') + .beConstantValueOf(0); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html new file mode 100644 index 0000000000..af1454a5a9 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html @@ -0,0 +1,47 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test AudioBufferSourceNode With Looping a Single-Sample Buffer + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + let sampleRate = 44100; + let testDurationSamples = 1000; + + audit.define('one-sample-loop', function(task, should) { + // Create the offline context for the test. + let context = + new OfflineAudioContext(1, testDurationSamples, sampleRate); + + // Create the single sample buffer + let buffer = createConstantBuffer(context, 1, 1); + + // Create the source and connect it to the destination + let source = context.createBufferSource(); + source.buffer = buffer; + source.loop = true; + source.connect(context.destination); + source.start(); + + // Render it! + context.startRendering() + .then(function(audioBuffer) { + should(audioBuffer.getChannelData(0), 'Rendered data') + .beConstantValueOf(1); + }) + .then(task.done.bind(task)); + ; + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html new file mode 100644 index 0000000000..5624054e32 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html @@ -0,0 +1,116 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audiobuffersource-playbackrate-zero.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + // Sample rate should be power of 128 to observe the change of AudioParam + // at the beginning of rendering quantum. (playbackRate is k-rate) This is + // the minimum sample rate in the valid sample rate range. + let sampleRate = 8192; + + // The render duration in seconds, and the length in samples. + let renderDuration = 1.0; + let renderLength = renderDuration * sampleRate; + + let context = new OfflineAudioContext(1, renderLength, sampleRate); + let audit = Audit.createTaskRunner(); + + + // Task: Render the actual buffer and compare with the reference. + audit.define('synthesize-verify', (task, should) => { + let ramp = context.createBufferSource(); + let rampBuffer = createLinearRampBuffer(context, renderLength); + ramp.buffer = rampBuffer; + + ramp.connect(context.destination); + ramp.start(); + + // Leave the playbackRate as 1 for the first half, then change it + // to zero at the exact half. The zero playback rate should hold the + // sample value of the buffer index at the moment. (sample-and-hold) + ramp.playbackRate.setValueAtTime(1.0, 0.0); + ramp.playbackRate.setValueAtTime(0.0, renderDuration / 2); + + context.startRendering() + .then(function(renderedBuffer) { + let data = renderedBuffer.getChannelData(0); + let rampData = rampBuffer.getChannelData(0); + let half = rampData.length / 2; + let passed = true; + let i; + + for (i = 1; i < rampData.length; i++) { + if (i < half) { + // Before the half position, the actual should match with the + // original ramp data. + if (data[i] !== rampData[i]) { + passed = false; + break; + } + } else { + // From the half position, the actual value should not change. + if (data[i] !== rampData[half]) { + passed = false; + break; + } + } + } + + should(passed, 'The zero playbackRate') + .message( + 'held the sample value correctly', + 'should hold the sample value. ' + + 'Expected ' + rampData[half] + ' but got ' + data[i] + + ' at the index ' + i); + }) + .then(() => task.done()); + }); + + audit.define('subsample start with playback rate 0', (task, should) => { + let context = new OfflineAudioContext(1, renderLength, sampleRate); + let rampBuffer = new AudioBuffer( + {length: renderLength, sampleRate: context.sampleRate}); + let data = new Float32Array(renderLength); + let startValue = 5; + for (let k = 0; k < data.length; ++k) { + data[k] = k + startValue; + } + rampBuffer.copyToChannel(data, 0); + + let src = new AudioBufferSourceNode( + context, {buffer: rampBuffer, playbackRate: 0}); + + src.connect(context.destination); + + // Purposely start the source between frame boundaries + let startFrame = 27.3; + src.start(startFrame / context.sampleRate); + + context.startRendering() + .then(audioBuffer => { + let actualStartFrame = Math.ceil(startFrame); + let audio = audioBuffer.getChannelData(0); + + should( + audio.slice(0, actualStartFrame), + `output[0:${actualStartFrame - 1}]`) + .beConstantValueOf(0); + should( + audio.slice(actualStartFrame), `output[${actualStartFrame}:]`) + .beConstantValueOf(startValue); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html new file mode 100644 index 0000000000..19331954b0 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html @@ -0,0 +1,174 @@ +<!DOCTYPE html> +<html> + <head> + <title> + audiobuffersource-start.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/audiobuffersource-testing.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + // The following test cases assume an AudioBuffer of length 8 whose PCM + // data is a linear ramp, 0, 1, 2, 3,... + + let tests = [ + + { + description: + 'start(when): implicitly play whole buffer from beginning to end', + offsetFrame: 'none', + durationFrames: 'none', + renderFrames: 16, + playbackRate: 1, + expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 0): play whole buffer from beginning to end explicitly giving offset of 0', + offsetFrame: 0, + durationFrames: 'none', + renderFrames: 16, + playbackRate: 1, + expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 0, 8_frames): play whole buffer from beginning to end explicitly giving offset of 0 and duration of 8 frames', + offsetFrame: 0, + durationFrames: 8, + renderFrames: 16, + playbackRate: 1, + expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 4_frames): play with explicit non-zero offset', + offsetFrame: 4, + durationFrames: 'none', + renderFrames: 16, + playbackRate: 1, + expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 4_frames, 4_frames): play with explicit non-zero offset and duration', + offsetFrame: 4, + durationFrames: 4, + renderFrames: 16, + playbackRate: 1, + expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 7_frames): play with explicit non-zero offset near end of buffer', + offsetFrame: 7, + durationFrames: 1, + renderFrames: 16, + playbackRate: 1, + expected: [7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 8_frames): play with explicit offset at end of buffer', + offsetFrame: 8, + durationFrames: 0, + renderFrames: 16, + playbackRate: 1, + expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + { + description: + 'start(when, 9_frames): play with explicit offset past end of buffer', + offsetFrame: 8, + durationFrames: 0, + renderFrames: 16, + playbackRate: 1, + expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + // When the duration exceeds the buffer, just play to the end of the + // buffer. (This is different from the case when we're looping, which is + // tested in loop-comprehensive.) + { + description: + 'start(when, 0, 15_frames): play with whole buffer, with long duration (clipped)', + offsetFrame: 0, + durationFrames: 15, + renderFrames: 16, + playbackRate: 1, + expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0] + }, + + // Enable test when AudioBufferSourceNode hack is fixed: + // https://bugs.webkit.org/show_bug.cgi?id=77224 { description: + // "start(when, 3_frames, 3_frames): play a middle section with explicit + // offset and duration", + // offsetFrame: 3, durationFrames: 3, renderFrames: 16, playbackRate: + // 1, expected: [4,5,6,7,0,0,0,0,0,0,0,0,0,0,0,0] }, + + ]; + + let sampleRate = 44100; + let buffer; + let bufferFrameLength = 8; + let testSpacingFrames = 32; + let testSpacingSeconds = testSpacingFrames / sampleRate; + let totalRenderLengthFrames = tests.length * testSpacingFrames; + + function runLoopTest(context, testNumber, test) { + let source = context.createBufferSource(); + + source.buffer = buffer; + source.playbackRate.value = test.playbackRate; + + source.connect(context.destination); + + // Render each test one after the other, spaced apart by + // testSpacingSeconds. + let startTime = testNumber * testSpacingSeconds; + + if (test.offsetFrame == 'none' && test.durationFrames == 'none') { + source.start(startTime); + } else if (test.durationFrames == 'none') { + let offset = test.offsetFrame / context.sampleRate; + source.start(startTime, offset); + } else { + let offset = test.offsetFrame / context.sampleRate; + let duration = test.durationFrames / context.sampleRate; + source.start(startTime, offset, duration); + } + } + + audit.define( + 'Tests AudioBufferSourceNode start()', function(task, should) { + // Create offline audio context. + let context = + new OfflineAudioContext(1, totalRenderLengthFrames, sampleRate); + buffer = createTestBuffer(context, bufferFrameLength); + + for (let i = 0; i < tests.length; ++i) + runLoopTest(context, i, tests[i]); + + context.startRendering().then(function(audioBuffer) { + checkAllTests(audioBuffer, should); + task.done(); + }); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html new file mode 100644 index 0000000000..20ef4a1c63 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html @@ -0,0 +1,101 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test Onended Event Listener + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script id="layout-test-code"> + let sampleRate = 44100; + let renderLengthSeconds = 1; + let renderLengthFrames = renderLengthSeconds * sampleRate; + + // Length of the source buffer. Anything less than the render length is + // fine. + let sourceBufferLengthFrames = renderLengthFrames / 8; + // When to stop the oscillator. Anything less than the render time is + // fine. + let stopTime = renderLengthSeconds / 8; + + let audit = Audit.createTaskRunner(); + + audit.define('absn-set-onended', (task, should) => { + // Test that the onended event for an AudioBufferSourceNode is fired + // when it is set directly. + let context = + new OfflineAudioContext(1, renderLengthFrames, sampleRate); + let buffer = context.createBuffer( + 1, sourceBufferLengthFrames, context.sampleRate); + let source = context.createBufferSource(); + source.buffer = buffer; + source.connect(context.destination); + source.onended = function(e) { + should( + true, 'AudioBufferSource.onended called when ended set directly') + .beEqualTo(true); + }; + source.start(); + context.startRendering().then(() => task.done()); + }); + + audit.define('absn-add-listener', (task, should) => { + // Test that the onended event for an AudioBufferSourceNode is fired + // when addEventListener is used to set the handler. + let context = + new OfflineAudioContext(1, renderLengthFrames, sampleRate); + let buffer = context.createBuffer( + 1, sourceBufferLengthFrames, context.sampleRate); + let source = context.createBufferSource(); + source.buffer = buffer; + source.connect(context.destination); + source.addEventListener('ended', function(e) { + should( + true, + 'AudioBufferSource.onended called when using addEventListener') + .beEqualTo(true); + }); + source.start(); + context.startRendering().then(() => task.done()); + }); + + audit.define('osc-set-onended', (task, should) => { + // Test that the onended event for an OscillatorNode is fired when it is + // set directly. + let context = + new OfflineAudioContext(1, renderLengthFrames, sampleRate); + let source = context.createOscillator(); + source.connect(context.destination); + source.onended = function(e) { + should(true, 'Oscillator.onended called when ended set directly') + .beEqualTo(true); + }; + source.start(); + source.stop(stopTime); + context.startRendering().then(() => task.done()); + }); + + audit.define('osc-add-listener', (task, should) => { + // Test that the onended event for an OscillatorNode is fired when + // addEventListener is used to set the handler. + let context = + new OfflineAudioContext(1, renderLengthFrames, sampleRate); + let source = context.createOscillator(); + source.connect(context.destination); + source.addEventListener('ended', function(e) { + should(true, 'Oscillator.onended called when using addEventListener') + .beEqualTo(true); + }); + source.start(); + source.stop(stopTime); + context.startRendering().then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html new file mode 100644 index 0000000000..3ac9c05938 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html @@ -0,0 +1,74 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test Scheduled Sources with Huge Time Limits + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/audioparam-testing.js"></script> + </head> + <body> + <script id="layout-test-code"> + let sampleRate = 48000; + let renderFrames = 1000; + + let audit = Audit.createTaskRunner(); + + audit.define('buffersource: huge stop time', (task, should) => { + // We only need to generate a small number of frames for this test. + let context = new OfflineAudioContext(1, renderFrames, sampleRate); + let src = context.createBufferSource(); + + // Constant source of amplitude 1, looping. + src.buffer = createConstantBuffer(context, 1, 1); + src.loop = true; + + // Create the graph and go! + let endTime = 1e300; + src.connect(context.destination); + src.start(); + src.stop(endTime); + + context.startRendering() + .then(function(resultBuffer) { + let result = resultBuffer.getChannelData(0); + should( + result, 'Output from AudioBufferSource.stop(' + endTime + ')') + .beConstantValueOf(1); + }) + .then(() => task.done()); + }); + + + audit.define('oscillator: huge stop time', (task, should) => { + // We only need to generate a small number of frames for this test. + let context = new OfflineAudioContext(1, renderFrames, sampleRate); + let src = context.createOscillator(); + + // Create the graph and go! + let endTime = 1e300; + src.connect(context.destination); + src.start(); + src.stop(endTime); + + context.startRendering() + .then(function(resultBuffer) { + let result = resultBuffer.getChannelData(0); + // The buffer should not be empty. Just find the max and verify + // that it's not zero. + let max = Math.max.apply(null, result); + should( + max, 'Peak amplitude from oscillator.stop(' + endTime + ')') + .beGreaterThan(0); + }) + .then(() => task.done()); + }); + + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html new file mode 100644 index 0000000000..c181ceb8e0 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html @@ -0,0 +1,101 @@ +<!doctype html> +<html> + <head> + <title>Test Extrapolation at end of AudibBuffer in an AudioBufferSourceNode</title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script> + let audit = Audit.createTaskRunner(); + + const sampleRate = 48000; + + // For testing we only need a few render quanta. + const renderSamples = 512 + + // Sample rate for our buffers. This is the lowest sample rate that is + // required to be supported. + const bufferRate = 8000; + + // Number of samples in each AudioBuffer; this is fairly arbitrary but + // should be less than a render quantum. + const bufferLength = 30; + + // Frequency of the sine wave for testing. + const frequency = 440; + + audit.define( + { + label: 'interpolate', + description: 'Interpolation of AudioBuffers to context sample rate' + }, + (task, should) => { + // The first channel is for the interpolated signal, and the second + // channel is for the reference signal from an oscillator. + let context = new OfflineAudioContext({ + numberOfChannels: 2, + length: renderSamples, + sampleRate: sampleRate + }); + + let merger = new ChannelMergerNode( + context, {numberOfChannels: context.destination.channelCount}); + merger.connect(context.destination); + + // Create a set of AudioBuffers which are samples from a pure sine + // wave with frequency |frequency|. + const nBuffers = Math.floor(context.length / bufferLength); + const omega = 2 * Math.PI * frequency / bufferRate; + + let frameNumber = 0; + let startTime = 0; + + for (let k = 0; k < nBuffers; ++k) { + // let buffer = context.createBuffer(1, bufferLength, + // bufferRate); + let buffer = new AudioBuffer( + {length: bufferLength, sampleRate: bufferRate}); + let data = buffer.getChannelData(0); + for (let n = 0; n < bufferLength; ++n) { + data[n] = Math.sin(omega * frameNumber); + ++frameNumber; + } + // Create a source using this buffer and start it at the end of + // the previous buffer. + let src = new AudioBufferSourceNode(context, {buffer: buffer}); + + src.connect(merger, 0, 0); + src.start(startTime); + startTime += buffer.duration; + } + + // Create the reference sine signal using an oscillator. + let osc = new OscillatorNode( + context, {type: 'sine', frequency: frequency}); + osc.connect(merger, 0, 1); + osc.start(0); + + context.startRendering() + .then(audioBuffer => { + let actual = audioBuffer.getChannelData(0); + let expected = audioBuffer.getChannelData(1); + + should(actual, 'Interpolated sine wave') + .beCloseToArray(expected, {absoluteThreshold: 9.0348e-2}); + + // Compute SNR between them. + let snr = 10 * Math.log10(computeSNR(actual, expected)); + + should(snr, `SNR (${snr.toPrecision(4)} dB)`) + .beGreaterThanOrEqualTo(37.17); + }) + .then(() => task.done()); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html new file mode 100644 index 0000000000..c1c3203451 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html @@ -0,0 +1,116 @@ +<!DOCTYPE html> +<html> + <head> + <title> + Test Constructor: AudioBufferSource + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/audionodeoptions.js"></script> + </head> + <body> + <script id="layout-test-code"> + let context; + + let audit = Audit.createTaskRunner(); + + audit.define('initialize', (task, should) => { + context = initializeContext(should); + task.done(); + }); + + audit.define('invalid constructor', (task, should) => { + testInvalidConstructor(should, 'AudioBufferSourceNode', context); + task.done(); + }); + + audit.define('default constructor', (task, should) => { + let prefix = 'node0'; + let node = + testDefaultConstructor(should, 'AudioBufferSourceNode', context, { + prefix: prefix, + numberOfInputs: 0, + numberOfOutputs: 1, + channelCount: 2, + channelCountMode: 'max', + channelInterpretation: 'speakers' + }); + + testDefaultAttributes(should, node, prefix, [ + {name: 'buffer', value: null}, + {name: 'detune', value: 0}, + {name: 'loop', value: false}, + {name: 'loopEnd', value: 0.0}, + {name: 'loopStart', value: 0.0}, + {name: 'playbackRate', value: 1.0}, + ]); + + task.done(); + }); + + audit.define('nullable buffer', (task, should) => { + let node; + let options = {buffer: null}; + + should( + () => { + node = new AudioBufferSourceNode(context, options); + }, + 'node1 = new AudioBufferSourceNode(c, ' + JSON.stringify(options)) + .notThrow(); + + should(node.buffer, 'node1.buffer').beEqualTo(null); + + task.done(); + }); + + audit.define('constructor options', (task, should) => { + let node; + let buffer = context.createBuffer(2, 1000, context.sampleRate); + + let options = { + buffer: buffer, + detune: .5, + loop: true, + loopEnd: (buffer.length / 2) / context.sampleRate, + loopStart: 5 / context.sampleRate, + playbackRate: .75 + }; + + let message = 'node = new AudioBufferSourceNode(c, ' + + JSON.stringify(options) + ')'; + + should(() => { + node = new AudioBufferSourceNode(context, options); + }, message).notThrow(); + + // Use the factory method to create an equivalent node and compare the + // results from the constructor against this node. + let factoryNode = context.createBufferSource(); + factoryNode.buffer = options.buffer; + factoryNode.detune.value = options.detune; + factoryNode.loop = options.loop; + factoryNode.loopEnd = options.loopEnd; + factoryNode.loopStart = options.loopStart; + factoryNode.playbackRate.value = options.playbackRate; + + should(node.buffer === buffer, 'node2.buffer === buffer') + .beEqualTo(true); + should(node.detune.value, 'node2.detune.value') + .beEqualTo(factoryNode.detune.value); + should(node.loop, 'node2.loop').beEqualTo(factoryNode.loop); + should(node.loopEnd, 'node2.loopEnd').beEqualTo(factoryNode.loopEnd); + should(node.loopStart, 'node2.loopStart') + .beEqualTo(factoryNode.loopStart); + should(node.playbackRate.value, 'node2.playbackRate.value') + .beEqualTo(factoryNode.playbackRate.value); + + task.done(); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html new file mode 100644 index 0000000000..37c4462add --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html @@ -0,0 +1,121 @@ +<!DOCTYPE html> +<html> + <head> + <title> + note-grain-on-play.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/note-grain-on-testing.js"></script> + </head> + <body> + <div id="description"></div> + <div id="console"></div> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + // To test noteGrainOn, a single ramp signal is created. + // Various sections of the ramp are rendered by noteGrainOn() at + // different times, and we verify that the actual output + // consists of the correct section of the ramp at the correct + // time. + + let linearRampBuffer; + + // Array of the grain offset used for each ramp played. + let grainOffsetTime = []; + + // Verify the received signal is a ramp from the correct section + // of our ramp signal. + function verifyGrain(renderedData, startFrame, endFrame, grainIndex) { + let grainOffsetFrame = + timeToSampleFrame(grainOffsetTime[grainIndex], sampleRate); + let grainFrameLength = endFrame - startFrame; + let ramp = linearRampBuffer.getChannelData(0); + let isCorrect = true; + + let expected; + let actual; + let frame; + + for (let k = 0; k < grainFrameLength; ++k) { + if (renderedData[startFrame + k] != ramp[grainOffsetFrame + k]) { + expected = ramp[grainOffsetFrame + k]; + actual = renderedData[startFrame + k]; + frame = startFrame + k; + isCorrect = false; + break; + } + } + return { + verified: isCorrect, + expected: expected, + actual: actual, + frame: frame + }; + } + + function checkResult(buffer, should) { + renderedData = buffer.getChannelData(0); + let nSamples = renderedData.length; + + // Number of grains that we found that have incorrect data. + let invalidGrainDataCount = 0; + + let startEndFrames = findStartAndEndSamples(renderedData); + + // Verify the start and stop times. Not strictly needed for + // this test, but it's useful to know that if the ramp data + // appears to be incorrect. + verifyStartAndEndFrames(startEndFrames, should); + + // Loop through each of the rendered grains and check that + // each grain contains our expected ramp. + for (let k = 0; k < startEndFrames.start.length; ++k) { + // Verify that the rendered data matches the expected + // section of our ramp signal. + let result = verifyGrain( + renderedData, startEndFrames.start[k], startEndFrames.end[k], k); + should(result.verified, 'Pulse ' + k + ' contained the expected data') + .beTrue(); + } + should( + invalidGrainDataCount, + 'Number of grains that did not contain the expected data') + .beEqualTo(0); + } + + audit.define( + { + label: 'note-grain-on-play', + description: 'Test noteGrainOn offset rendering' + }, + function(task, should) { + // Create offline audio context. + context = + new OfflineAudioContext(2, sampleRate * renderTime, sampleRate); + + // Create a linear ramp for testing noteGrainOn. + linearRampBuffer = createSignalBuffer(context, function(k) { + // Want the ramp to start + // with 1, not 0. + return k + 1; + }); + + let grainInfo = + playAllGrains(context, linearRampBuffer, numberOfTests); + + grainOffsetTime = grainInfo.grainOffsetTimes; + + context.startRendering().then(function(audioBuffer) { + checkResult(audioBuffer, should); + task.done(); + }); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html new file mode 100644 index 0000000000..0db297b42c --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html @@ -0,0 +1,47 @@ +<!DOCTYPE html> +<html> + <head> + <title> + note-grain-on-timing.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/note-grain-on-testing.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + let squarePulseBuffer; + + function checkResult(buffer, should) { + renderedData = buffer.getChannelData(0); + let nSamples = renderedData.length; + let startEndFrames = findStartAndEndSamples(renderedData); + + verifyStartAndEndFrames(startEndFrames, should); + } + + audit.define('Test timing of noteGrainOn', function(task, should) { + // Create offline audio context. + context = + new OfflineAudioContext(2, sampleRate * renderTime, sampleRate); + + squarePulseBuffer = createSignalBuffer(context, function(k) { + return 1 + }); + + playAllGrains(context, squarePulseBuffer, numberOfTests); + + context.startRendering().then(function(audioBuffer) { + checkResult(audioBuffer, should); + task.done(); + }); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav Binary files differnew file mode 100644 index 0000000000..ab9d5fe5a9 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html new file mode 100644 index 0000000000..5fafd024ee --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html @@ -0,0 +1,110 @@ +<!DOCTYPE html> +<!-- +Tests that we are able to schedule a series of notes to playback with sample-accuracy. +We use an impulse so we can tell exactly where the rendering is happening. +--> +<html> + <head> + <title> + sample-accurate-scheduling.html + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + <script src="/webaudio/resources/buffer-loader.js"></script> + </head> + <body> + <script id="layout-test-code"> + let audit = Audit.createTaskRunner(); + + let sampleRate = 44100.0; + let lengthInSeconds = 4; + + let context = 0; + let bufferLoader = 0; + let impulse; + + // See if we can render at exactly these sample offsets. + let sampleOffsets = [0, 3, 512, 517, 1000, 1005, 20000, 21234, 37590]; + + function createImpulse() { + // An impulse has a value of 1 at time 0, and is otherwise 0. + impulse = context.createBuffer(2, 512, sampleRate); + let sampleDataL = impulse.getChannelData(0); + let sampleDataR = impulse.getChannelData(1); + sampleDataL[0] = 1.0; + sampleDataR[0] = 1.0; + } + + function playNote(time) { + let bufferSource = context.createBufferSource(); + bufferSource.buffer = impulse; + bufferSource.connect(context.destination); + bufferSource.start(time); + } + + function checkSampleAccuracy(buffer, should) { + let bufferDataL = buffer.getChannelData(0); + let bufferDataR = buffer.getChannelData(1); + + let impulseCount = 0; + let badOffsetCount = 0; + + // Left and right channels must be the same. + should(bufferDataL, 'Content of left and right channels match and') + .beEqualToArray(bufferDataR); + + // Go through every sample and make sure it's 0, except at positions in + // sampleOffsets. + for (let i = 0; i < buffer.length; ++i) { + if (bufferDataL[i] != 0) { + // Make sure this index is in sampleOffsets + let found = false; + for (let j = 0; j < sampleOffsets.length; ++j) { + if (sampleOffsets[j] == i) { + found = true; + break; + } + } + ++impulseCount; + should(found, 'Non-zero sample found at sample offset ' + i) + .beTrue(); + if (!found) { + ++badOffsetCount; + } + } + } + + should(impulseCount, 'Number of impulses found') + .beEqualTo(sampleOffsets.length); + + if (impulseCount == sampleOffsets.length) { + should(badOffsetCount, 'bad offset').beEqualTo(0); + } + } + + audit.define( + {label: 'test', description: 'Test sample-accurate scheduling'}, + function(task, should) { + + // Create offline audio context. + context = new OfflineAudioContext( + 2, sampleRate * lengthInSeconds, sampleRate); + createImpulse(); + + for (let i = 0; i < sampleOffsets.length; ++i) { + let timeInSeconds = sampleOffsets[i] / sampleRate; + playNote(timeInSeconds); + } + + context.startRendering().then(function(buffer) { + checkSampleAccuracy(buffer, should); + task.done(); + }); + }); + + audit.run(); + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html new file mode 100644 index 0000000000..3700bfa8ce --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html @@ -0,0 +1,133 @@ +<!doctype html> +<html> + <head> + <title> + Test Sub-Sample Accurate Stitching of ABSNs + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script> + let audit = Audit.createTaskRunner(); + + audit.define( + { + label: 'buffer-stitching-1', + description: 'Subsample buffer stitching, same rates' + }, + (task, should) => { + const sampleRate = 44100; + const bufferRate = 44100; + const bufferLength = 30; + + // Experimentally determined thresholds. DO NOT relax these values + // to far from these values to make the tests pass. + const errorThreshold = 9.0957e-5; + const snrThreshold = 85.580; + + // Informative message + should(sampleRate, 'Test 1: context.sampleRate') + .beEqualTo(sampleRate); + testBufferStitching(sampleRate, bufferRate, bufferLength) + .then(resultBuffer => { + const actual = resultBuffer.getChannelData(0); + const expected = resultBuffer.getChannelData(1); + should( + actual, + `Stitched sine-wave buffers at sample rate ${bufferRate}`) + .beCloseToArray( + expected, {absoluteThreshold: errorThreshold}); + const SNR = 10 * Math.log10(computeSNR(actual, expected)); + should(SNR, `SNR (${SNR} dB)`) + .beGreaterThanOrEqualTo(snrThreshold); + }) + .then(() => task.done()); + }); + + audit.define( + { + label: 'buffer-stitching-2', + description: 'Subsample buffer stitching, different rates' + }, + (task, should) => { + const sampleRate = 44100; + const bufferRate = 43800; + const bufferLength = 30; + + // Experimentally determined thresholds. DO NOT relax these values + // to far from these values to make the tests pass. + const errorThreshold = 3.8986e-3; + const snrThreshold = 65.737; + + // Informative message + should(sampleRate, 'Test 2: context.sampleRate') + .beEqualTo(sampleRate); + testBufferStitching(sampleRate, bufferRate, bufferLength) + .then(resultBuffer => { + const actual = resultBuffer.getChannelData(0); + const expected = resultBuffer.getChannelData(1); + should( + actual, + `Stitched sine-wave buffers at sample rate ${bufferRate}`) + .beCloseToArray( + expected, {absoluteThreshold: errorThreshold}); + const SNR = 10 * Math.log10(computeSNR(actual, expected)); + should(SNR, `SNR (${SNR} dB)`) + .beGreaterThanOrEqualTo(snrThreshold); + }) + .then(() => task.done()); + }); + + audit.run(); + + // Create graph to test stitching of consecutive ABSNs. The context rate + // is |sampleRate|, and the buffers have a fixed length of |bufferLength| + // and rate of |bufferRate|. The |bufferRate| should not be too different + // from |sampleRate| because of interpolation of the buffer to the context + // rate. + function testBufferStitching(sampleRate, bufferRate, bufferLength) { + // The context for testing. Channel 0 contains the output from + // stitching all the buffers together, and channel 1 contains the + // expected output. + const context = new OfflineAudioContext( + {numberOfChannels: 2, length: sampleRate, sampleRate: sampleRate}); + + const merger = new ChannelMergerNode( + context, {numberOfInputs: context.destination.channelCount}); + + merger.connect(context.destination); + + // The reference is a sine wave at 440 Hz. + const ref = new OscillatorNode(context, {frequency: 440, type: 'sine'}); + ref.connect(merger, 0, 1); + ref.start(); + + // The test signal is a bunch of short AudioBufferSources containing + // bits of a sine wave. + let waveSignal = new Float32Array(context.length); + const omega = 2 * Math.PI / bufferRate * ref.frequency.value; + for (let k = 0; k < context.length; ++k) { + waveSignal[k] = Math.sin(omega * k); + } + + // Slice the sine wave into many little buffers to be assigned to ABSNs + // that are started at the appropriate times to produce a final sine + // wave. + for (let k = 0; k < context.length; k += bufferLength) { + const buffer = + new AudioBuffer({length: bufferLength, sampleRate: bufferRate}); + buffer.copyToChannel(waveSignal.slice(k, k + bufferLength), 0); + + const src = new AudioBufferSourceNode(context, {buffer: buffer}); + src.connect(merger, 0, 0); + src.start(k / bufferRate); + } + + return context.startRendering(); + } + </script> + </body> +</html> diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html new file mode 100644 index 0000000000..8c627f90f2 --- /dev/null +++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html @@ -0,0 +1,423 @@ +<!doctype html> +<html> + <head> + <title> + Test Sub-Sample Accurate Scheduling for ABSN + </title> + <script src="/resources/testharness.js"></script> + <script src="/resources/testharnessreport.js"></script> + <script src="/webaudio/resources/audit-util.js"></script> + <script src="/webaudio/resources/audit.js"></script> + </head> + <body> + <script> + // Power of two so there's no roundoff converting from integer frames to + // time. + let sampleRate = 32768; + + let audit = Audit.createTaskRunner(); + + audit.define('sub-sample accurate start', (task, should) => { + // There are two channels, one for each source. Only need to render + // quanta for this test. + let context = new OfflineAudioContext( + {numberOfChannels: 2, length: 8192, sampleRate: sampleRate}); + let merger = new ChannelMergerNode( + context, {numberOfInputs: context.destination.channelCount}); + + merger.connect(context.destination); + + // Use a simple linear ramp for the sources with integer steps starting + // at 1 to make it easy to verify and test that have sub-sample accurate + // start. Ramp starts at 1 so we can easily tell when the source + // starts. + let rampBuffer = new AudioBuffer( + {length: context.length, sampleRate: context.sampleRate}); + let r = rampBuffer.getChannelData(0); + for (let k = 0; k < r.length; ++k) { + r[k] = k + 1; + } + + const src0 = new AudioBufferSourceNode(context, {buffer: rampBuffer}); + const src1 = new AudioBufferSourceNode(context, {buffer: rampBuffer}); + + // Frame where sources should start. This is pretty arbitrary, but one + // should be close to an integer and the other should be close to the + // next integer. We do this to catch the case where rounding of the + // start frame is being done. Rounding is incorrect. + const startFrame = 33; + const startFrame0 = startFrame + 0.1; + const startFrame1 = startFrame + 0.9; + + src0.connect(merger, 0, 0); + src1.connect(merger, 0, 1); + + src0.start(startFrame0 / context.sampleRate); + src1.start(startFrame1 / context.sampleRate); + + context.startRendering() + .then(audioBuffer => { + const output0 = audioBuffer.getChannelData(0); + const output1 = audioBuffer.getChannelData(1); + + // Compute the expected output by interpolating the ramp buffer of + // the sources if they started at the given frame. + const ramp = rampBuffer.getChannelData(0); + const expected0 = interpolateRamp(ramp, startFrame0); + const expected1 = interpolateRamp(ramp, startFrame1); + + // Verify output0 has the correct values + + // For information only + should(startFrame0, 'src0 start frame').beEqualTo(startFrame0); + + // Output must be zero before the source start frame, and it must + // be interpolated correctly after the start frame. The + // absoluteThreshold below is currently set for Chrome which does + // linear interpolation. This needs to be updated eventually if + // other browsers do not user interpolation. + should( + output0.slice(0, startFrame + 1), `output0[0:${startFrame}]`) + .beConstantValueOf(0); + should( + output0.slice(startFrame + 1, expected0.length), + `output0[${startFrame + 1}:${expected0.length - 1}]`) + .beCloseToArray( + expected0.slice(startFrame + 1), {absoluteThreshold: 0}); + + // Verify output1 has the correct values. Same approach as for + // output0. + should(startFrame1, 'src1 start frame').beEqualTo(startFrame1); + + should( + output1.slice(0, startFrame + 1), `output1[0:${startFrame}]`) + .beConstantValueOf(0); + should( + output1.slice(startFrame + 1, expected1.length), + `output1[${startFrame + 1}:${expected1.length - 1}]`) + .beCloseToArray( + expected1.slice(startFrame + 1), {absoluteThreshold: 0}); + }) + .then(() => task.done()); + }); + + audit.define('sub-sample accurate stop', (task, should) => { + // There are threes channesl, one for each source. Only need to render + // quanta for this test. + let context = new OfflineAudioContext( + {numberOfChannels: 3, length: 128, sampleRate: sampleRate}); + let merger = new ChannelMergerNode( + context, {numberOfInputs: context.destination.channelCount}); + + merger.connect(context.destination); + + // The source can be as simple constant for this test. + let buffer = new AudioBuffer( + {length: context.length, sampleRate: context.sampleRate}); + buffer.getChannelData(0).fill(1); + + const src0 = new AudioBufferSourceNode(context, {buffer: buffer}); + const src1 = new AudioBufferSourceNode(context, {buffer: buffer}); + const src2 = new AudioBufferSourceNode(context, {buffer: buffer}); + + // Frame where sources should start. This is pretty arbitrary, but one + // should be an integer, one should be close to an integer and the other + // should be close to the next integer. This is to catch the case where + // rounding is used for the end frame. Rounding is incorrect. + const endFrame = 33; + const endFrame1 = endFrame + 0.1; + const endFrame2 = endFrame + 0.9; + + src0.connect(merger, 0, 0); + src1.connect(merger, 0, 1); + src2.connect(merger, 0, 2); + + src0.start(0); + src1.start(0); + src2.start(0); + src0.stop(endFrame / context.sampleRate); + src1.stop(endFrame1 / context.sampleRate); + src2.stop(endFrame2 / context.sampleRate); + + context.startRendering() + .then(audioBuffer => { + let actual0 = audioBuffer.getChannelData(0); + let actual1 = audioBuffer.getChannelData(1); + let actual2 = audioBuffer.getChannelData(2); + + // Just verify that we stopped at the right time. + + // This is case where the end frame is an integer. Since the first + // output ends on an exact frame, the output must be zero at that + // frame number. We print the end frame for information only; it + // makes interpretation of the rest easier. + should(endFrame - 1, 'src0 end frame') + .beEqualTo(endFrame - 1); + should(actual0[endFrame - 1], `output0[${endFrame - 1}]`) + .notBeEqualTo(0); + should(actual0.slice(endFrame), + `output0[${endFrame}:]`) + .beConstantValueOf(0); + + // The case where the end frame is just a little above an integer. + // The output must not be zero just before the end and must be zero + // after. + should(endFrame1, 'src1 end frame') + .beEqualTo(endFrame1); + should(actual1[endFrame], `output1[${endFrame}]`) + .notBeEqualTo(0); + should(actual1.slice(endFrame + 1), + `output1[${endFrame + 1}:]`) + .beConstantValueOf(0); + + // The case where the end frame is just a little below an integer. + // The output must not be zero just before the end and must be zero + // after. + should(endFrame2, 'src2 end frame') + .beEqualTo(endFrame2); + should(actual2[endFrame], `output2[${endFrame}]`) + .notBeEqualTo(0); + should(actual2.slice(endFrame + 1), + `output2[${endFrame + 1}:]`) + .beConstantValueOf(0); + }) + .then(() => task.done()); + }); + + audit.define('sub-sample-grain', (task, should) => { + let context = new OfflineAudioContext( + {numberOfChannels: 2, length: 128, sampleRate: sampleRate}); + + let merger = new ChannelMergerNode( + context, {numberOfInputs: context.destination.channelCount}); + + merger.connect(context.destination); + + // The source can be as simple constant for this test. + let buffer = new AudioBuffer( + {length: context.length, sampleRate: context.sampleRate}); + buffer.getChannelData(0).fill(1); + + let src0 = new AudioBufferSourceNode(context, {buffer: buffer}); + let src1 = new AudioBufferSourceNode(context, {buffer: buffer}); + + src0.connect(merger, 0, 0); + src1.connect(merger, 0, 1); + + // Start a short grain. + const src0StartGrain = 3.1; + const src0EndGrain = 37.2; + src0.start( + src0StartGrain / context.sampleRate, 0, + (src0EndGrain - src0StartGrain) / context.sampleRate); + + const src1StartGrain = 5.8; + const src1EndGrain = 43.9; + src1.start( + src1StartGrain / context.sampleRate, 0, + (src1EndGrain - src1StartGrain) / context.sampleRate); + + context.startRendering() + .then(audioBuffer => { + let output0 = audioBuffer.getChannelData(0); + let output1 = audioBuffer.getChannelData(1); + + let expected = new Float32Array(context.length); + + // Compute the expected output for output0 and verify the actual + // output matches. + expected.fill(1); + for (let k = 0; k <= Math.floor(src0StartGrain); ++k) { + expected[k] = 0; + } + for (let k = Math.ceil(src0EndGrain); k < expected.length; ++k) { + expected[k] = 0; + } + + verifyGrain(should, output0, { + startGrain: src0StartGrain, + endGrain: src0EndGrain, + sourceName: 'src0', + outputName: 'output0' + }); + + verifyGrain(should, output1, { + startGrain: src1StartGrain, + endGrain: src1EndGrain, + sourceName: 'src1', + outputName: 'output1' + }); + }) + .then(() => task.done()); + }); + + audit.define( + 'sub-sample accurate start with playbackRate', (task, should) => { + // There are two channels, one for each source. Only need to render + // quanta for this test. + let context = new OfflineAudioContext( + {numberOfChannels: 2, length: 8192, sampleRate: sampleRate}); + let merger = new ChannelMergerNode( + context, {numberOfInputs: context.destination.channelCount}); + + merger.connect(context.destination); + + // Use a simple linear ramp for the sources with integer steps + // starting at 1 to make it easy to verify and test that have + // sub-sample accurate start. Ramp starts at 1 so we can easily + // tell when the source starts. + let buffer = new AudioBuffer( + {length: context.length, sampleRate: context.sampleRate}); + let r = buffer.getChannelData(0); + for (let k = 0; k < r.length; ++k) { + r[k] = k + 1; + } + + // Two sources with different playback rates + const src0 = new AudioBufferSourceNode( + context, {buffer: buffer, playbackRate: .25}); + const src1 = new AudioBufferSourceNode( + context, {buffer: buffer, playbackRate: 4}); + + // Frame where sources start. Pretty arbitrary but should not be an + // integer. + const startFrame = 17.8; + + src0.connect(merger, 0, 0); + src1.connect(merger, 0, 1); + + src0.start(startFrame / context.sampleRate); + src1.start(startFrame / context.sampleRate); + + context.startRendering() + .then(audioBuffer => { + const output0 = audioBuffer.getChannelData(0); + const output1 = audioBuffer.getChannelData(1); + + const frameBefore = Math.floor(startFrame); + const frameAfter = frameBefore + 1; + + // Informative message so we know what the following output + // indices really mean. + should(startFrame, 'Source start frame') + .beEqualTo(startFrame); + + // Verify the output + + // With a startFrame of 17.8, the first output is at frame 18, + // but the actual start is at 17.8. So we would interpolate + // the output 0.2 fraction of the way between 17.8 and 18, for + // an output of 1.2 for our ramp. But the playback rate is + // 0.25, so we're really only 1/4 as far along as we think so + // the output is .2*0.25 of the way between 1 and 2 or 1.05. + + const ramp0 = buffer.getChannelData(0)[0]; + const ramp1 = buffer.getChannelData(0)[1]; + + const src0Output = ramp0 + + (ramp1 - ramp0) * (frameAfter - startFrame) * + src0.playbackRate.value; + + let playbackMessage = + `With playbackRate ${src0.playbackRate.value}:`; + + should( + output0[frameBefore], + `${playbackMessage} output0[${frameBefore}]`) + .beEqualTo(0); + should( + output0[frameAfter], + `${playbackMessage} output0[${frameAfter}]`) + .beCloseTo(src0Output, {threshold: 4.542e-8}); + + const src1Output = ramp0 + + (ramp1 - ramp0) * (frameAfter - startFrame) * + src1.playbackRate.value; + + playbackMessage = + `With playbackRate ${src1.playbackRate.value}:`; + + should( + output1[frameBefore], + `${playbackMessage} output1[${frameBefore}]`) + .beEqualTo(0); + should( + output1[frameAfter], + `${playbackMessage} output1[${frameAfter}]`) + .beCloseTo(src1Output, {threshold: 4.542e-8}); + }) + .then(() => task.done()); + }); + + audit.run(); + + // Given an input ramp in |rampBuffer|, interpolate the signal assuming + // this ramp is used for an ABSN that starts at frame |startFrame|, which + // is not necessarily an integer. For simplicity we just use linear + // interpolation here. The interpolation is not part of the spec but + // this should be pretty close to whatever interpolation is being done. + function interpolateRamp(rampBuffer, startFrame) { + // |start| is the last zero sample before the ABSN actually starts. + const start = Math.floor(startFrame); + // One less than the rampBuffer because we can't linearly interpolate + // the last frame. + let result = new Float32Array(rampBuffer.length - 1); + + for (let k = 0; k <= start; ++k) { + result[k] = 0; + } + + // Now start linear interpolation. + let frame = startFrame; + let index = 1; + for (let k = start + 1; k < result.length; ++k) { + let s0 = rampBuffer[index]; + let s1 = rampBuffer[index - 1]; + let delta = frame - k; + let s = s1 - delta * (s0 - s1); + result[k] = s; + ++frame; + ++index; + } + + return result; + } + + function verifyGrain(should, output, options) { + let {startGrain, endGrain, sourceName, outputName} = options; + let expected = new Float32Array(output.length); + // Compute the expected output for output and verify the actual + // output matches. + expected.fill(1); + for (let k = 0; k <= Math.floor(startGrain); ++k) { + expected[k] = 0; + } + for (let k = Math.ceil(endGrain); k < expected.length; ++k) { + expected[k] = 0; + } + + should(startGrain, `${sourceName} grain start`).beEqualTo(startGrain); + should(endGrain - startGrain, `${sourceName} grain duration`) + .beEqualTo(endGrain - startGrain); + should(endGrain, `${sourceName} grain end`).beEqualTo(endGrain); + should(output, outputName).beEqualToArray(expected); + should( + output[Math.floor(startGrain)], + `${outputName}[${Math.floor(startGrain)}]`) + .beEqualTo(0); + should( + output[1 + Math.floor(startGrain)], + `${outputName}[${1 + Math.floor(startGrain)}]`) + .notBeEqualTo(0); + should( + output[Math.floor(endGrain)], + `${outputName}[${Math.floor(endGrain)}]`) + .notBeEqualTo(0); + should( + output[1 + Math.floor(endGrain)], + `${outputName}[${1 + Math.floor(endGrain)}]`) + .beEqualTo(0); + } + </script> + </body> +</html> |