diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 09:22:09 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 09:22:09 +0000 |
commit | 43a97878ce14b72f0981164f87f2e35e14151312 (patch) | |
tree | 620249daf56c0258faa40cbdcf9cfba06de2a846 /testing/web-platform/tests/webaudio/resources | |
parent | Initial commit. (diff) | |
download | firefox-upstream.tar.xz firefox-upstream.zip |
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/webaudio/resources')
21 files changed, 4798 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/resources/4ch-440.wav b/testing/web-platform/tests/webaudio/resources/4ch-440.wav Binary files differnew file mode 100644 index 0000000000..85dc1ea904 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/4ch-440.wav diff --git a/testing/web-platform/tests/webaudio/resources/audio-param.js b/testing/web-platform/tests/webaudio/resources/audio-param.js new file mode 100644 index 0000000000..bc33fe8a21 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/audio-param.js @@ -0,0 +1,44 @@ +// Define functions that implement the formulas for AudioParam automations. + +// AudioParam linearRamp value at time t for a linear ramp between (t0, v0) and +// (t1, v1). It is assumed that t0 <= t. Results are undefined otherwise. +function audioParamLinearRamp(t, v0, t0, v1, t1) { + if (t >= t1) + return v1; + return (v0 + (v1 - v0) * (t - t0) / (t1 - t0)) +} + +// AudioParam exponentialRamp value at time t for an exponential ramp between +// (t0, v0) and (t1, v1). It is assumed that t0 <= t. Results are undefined +// otherwise. +function audioParamExponentialRamp(t, v0, t0, v1, t1) { + if (t >= t1) + return v1; + return v0 * Math.pow(v1 / v0, (t - t0) / (t1 - t0)); +} + +// AudioParam setTarget value at time t for a setTarget curve starting at (t0, +// v0) with a final value of vFainal and a time constant of timeConstant. It is +// assumed that t0 <= t. Results are undefined otherwise. +function audioParamSetTarget(t, v0, t0, vFinal, timeConstant) { + return vFinal + (v0 - vFinal) * Math.exp(-(t - t0) / timeConstant); +} + +// AudioParam setValueCurve value at time t for a setValueCurve starting at time +// t0 with curve, curve, and duration duration. The sample rate is sampleRate. +// It is assumed that t0 <= t. +function audioParamSetValueCurve(t, curve, t0, duration) { + if (t > t0 + duration) + return curve[curve.length - 1]; + + let curvePointsPerSecond = (curve.length - 1) / duration; + + let virtualIndex = (t - t0) * curvePointsPerSecond; + let index = Math.floor(virtualIndex); + + let delta = virtualIndex - index; + + let c0 = curve[index]; + let c1 = curve[Math.min(index + 1, curve.length - 1)]; + return c0 + (c1 - c0) * delta; +} diff --git a/testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js b/testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js new file mode 100644 index 0000000000..2233641914 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js @@ -0,0 +1,102 @@ +function createTestBuffer(context, sampleFrameLength) { + let audioBuffer = + context.createBuffer(1, sampleFrameLength, context.sampleRate); + let channelData = audioBuffer.getChannelData(0); + + // Create a simple linear ramp starting at zero, with each value in the buffer + // equal to its index position. + for (let i = 0; i < sampleFrameLength; ++i) + channelData[i] = i; + + return audioBuffer; +} + +function checkSingleTest(renderedBuffer, i, should) { + let renderedData = renderedBuffer.getChannelData(0); + let offsetFrame = i * testSpacingFrames; + + let test = tests[i]; + let expected = test.expected; + let description; + + if (test.description) { + description = test.description; + } else { + // No description given, so create a basic one from the given test + // parameters. + description = + 'loop from ' + test.loopStartFrame + ' -> ' + test.loopEndFrame; + if (test.offsetFrame) + description += ' with offset ' + test.offsetFrame; + if (test.playbackRate && test.playbackRate != 1) + description += ' with playbackRate of ' + test.playbackRate; + } + + let framesToTest; + + if (test.renderFrames) + framesToTest = test.renderFrames; + else if (test.durationFrames) + framesToTest = test.durationFrames; + + // Verify that the output matches + let prefix = 'Case ' + i + ': '; + should( + renderedData.slice(offsetFrame, offsetFrame + framesToTest), + prefix + description) + .beEqualToArray(expected); + + // Verify that we get all zeroes after the buffer (or duration) has passed. + should( + renderedData.slice( + offsetFrame + framesToTest, offsetFrame + testSpacingFrames), + prefix + description + ': tail') + .beConstantValueOf(0); +} + +function checkAllTests(renderedBuffer, should) { + for (let i = 0; i < tests.length; ++i) + checkSingleTest(renderedBuffer, i, should); +} + + +// Create the actual result by modulating playbackRate or detune AudioParam of +// ABSN. |modTarget| is a string of AudioParam name, |modOffset| is the offset +// (anchor) point of modulation, and |modRange| is the range of modulation. +// +// createSawtoothWithModulation(context, 'detune', 440, 1200); +// +// The above will perform a modulation on detune within the range of +// [1200, -1200] around the sawtooth waveform on 440Hz. +function createSawtoothWithModulation(context, modTarget, modOffset, modRange) { + let lfo = context.createOscillator(); + let amp = context.createGain(); + + // Create a sawtooth generator with the signal range of [0, 1]. + let phasor = context.createBufferSource(); + let phasorBuffer = context.createBuffer(1, sampleRate, sampleRate); + let phasorArray = phasorBuffer.getChannelData(0); + let phase = 0, phaseStep = 1 / sampleRate; + for (let i = 0; i < phasorArray.length; i++) { + phasorArray[i] = phase % 1.0; + phase += phaseStep; + } + phasor.buffer = phasorBuffer; + phasor.loop = true; + + // 1Hz for audible (human-perceivable) parameter modulation by LFO. + lfo.frequency.value = 1.0; + + amp.gain.value = modRange; + phasor.playbackRate.value = modOffset; + + // The oscillator output should be amplified accordingly to drive the + // modulation within the desired range. + lfo.connect(amp); + amp.connect(phasor[modTarget]); + + phasor.connect(context.destination); + + lfo.start(); + phasor.start(); +} diff --git a/testing/web-platform/tests/webaudio/resources/audionodeoptions.js b/testing/web-platform/tests/webaudio/resources/audionodeoptions.js new file mode 100644 index 0000000000..3b7867cabf --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/audionodeoptions.js @@ -0,0 +1,292 @@ +// Test that constructor for the node with name |nodeName| handles the +// various possible values for channelCount, channelCountMode, and +// channelInterpretation. + +// The |should| parameter is the test function from new |Audit|. +function testAudioNodeOptions(should, context, nodeName, expectedNodeOptions) { + if (expectedNodeOptions === undefined) + expectedNodeOptions = {}; + let node; + + // Test that we can set channelCount and that errors are thrown for + // invalid values + let testChannelCount = 17; + if (expectedNodeOptions.channelCount) { + testChannelCount = expectedNodeOptions.channelCount.value; + } + should( + () => { + node = new window[nodeName]( + context, Object.assign({}, expectedNodeOptions.additionalOptions, { + channelCount: testChannelCount + })); + }, + 'new ' + nodeName + '(c, {channelCount: ' + testChannelCount + '})') + .notThrow(); + should(node.channelCount, 'node.channelCount').beEqualTo(testChannelCount); + + if (expectedNodeOptions.channelCount && + expectedNodeOptions.channelCount.isFixed) { + // The channel count is fixed. Verify that we throw an error if + // we try to change it. Arbitrarily set the count to be one more + // than the expected value. + testChannelCount = expectedNodeOptions.channelCount.value + 1; + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelCount: testChannelCount})); + }, + 'new ' + nodeName + '(c, {channelCount: ' + testChannelCount + '})') + .throw(DOMException, + expectedNodeOptions.channelCount.exceptionType); + // And test that setting it to the fixed value does not throw. + testChannelCount = expectedNodeOptions.channelCount.value; + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelCount: testChannelCount})); + node.channelCount = testChannelCount; + }, + '(new ' + nodeName + '(c, {channelCount: ' + testChannelCount + '})).channelCount = ' + testChannelCount) + .notThrow(); + } else { + // The channel count is not fixed. Try to set the count to invalid + // values and make sure an error is thrown. + [0, 99].forEach(testValue => { + should(() => { + node = new window[nodeName]( + context, Object.assign({}, expectedNodeOptions.additionalOptions, { + channelCount: testValue + })); + }, `new ${nodeName}(c, {channelCount: ${testValue}})`) + .throw(DOMException, 'NotSupportedError'); + }); + } + + // Test channelCountMode + let testChannelCountMode = 'max'; + if (expectedNodeOptions.channelCountMode) { + testChannelCountMode = expectedNodeOptions.channelCountMode.value; + } + should( + () => { + node = new window[nodeName]( + context, Object.assign({}, expectedNodeOptions.additionalOptions, { + channelCountMode: testChannelCountMode + })); + }, + 'new ' + nodeName + '(c, {channelCountMode: "' + testChannelCountMode + + '"}') + .notThrow(); + should(node.channelCountMode, 'node.channelCountMode') + .beEqualTo(testChannelCountMode); + + if (expectedNodeOptions.channelCountMode && + expectedNodeOptions.channelCountMode.isFixed) { + // Channel count mode is fixed. Test setting to something else throws. + ['max', 'clamped-max', 'explicit'].forEach(testValue => { + if (testValue !== expectedNodeOptions.channelCountMode.value) { + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelCountMode: testValue})); + }, + `new ${nodeName}(c, {channelCountMode: "${testValue}"})`) + .throw(DOMException, + expectedNodeOptions.channelCountMode.exceptionType); + } else { + // Test that explicitly setting the the fixed value is allowed. + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelCountMode: testValue})); + node.channelCountMode = testValue; + }, + `(new ${nodeName}(c, {channelCountMode: "${testValue}"})).channelCountMode = "${testValue}"`) + .notThrow(); + } + }); + } else { + // Mode is not fixed. Verify that we can set the mode to all valid + // values, and that we throw for invalid values. + + let testValues = ['max', 'clamped-max', 'explicit']; + + testValues.forEach(testValue => { + should(() => { + node = new window[nodeName]( + context, Object.assign({}, expectedNodeOptions.additionalOptions, { + channelCountMode: testValue + })); + }, `new ${nodeName}(c, {channelCountMode: "${testValue}"})`).notThrow(); + should( + node.channelCountMode, 'node.channelCountMode after valid setter') + .beEqualTo(testValue); + + }); + + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelCountMode: 'foobar'})); + }, + 'new ' + nodeName + '(c, {channelCountMode: "foobar"}') + .throw(TypeError); + should(node.channelCountMode, 'node.channelCountMode after invalid setter') + .beEqualTo(testValues[testValues.length - 1]); + } + + // Test channelInterpretation + if (expectedNodeOptions.channelInterpretation && + expectedNodeOptions.channelInterpretation.isFixed) { + // The channel interpretation is fixed. Verify that we throw an + // error if we try to change it. + ['speakers', 'discrete'].forEach(testValue => { + if (testValue !== expectedNodeOptions.channelInterpretation.value) { + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionOptions, + {channelInterpretation: testValue})); + }, + `new ${nodeName}(c, {channelInterpretation: "${testValue}"})`) + .throw(DOMException, + expectedNodeOptions.channelCountMode.exceptionType); + } else { + // Check that assigning the fixed value is OK. + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionOptions, + {channelInterpretation: testValue})); + node.channelInterpretation = testValue; + }, + `(new ${nodeName}(c, {channelInterpretation: "${testValue}"})).channelInterpretation = "${testValue}"`) + .notThrow(); + } + }); + } else { + // Channel interpretation is not fixed. Verify that we can set it + // to all possible values. + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelInterpretation: 'speakers'})); + }, + 'new ' + nodeName + '(c, {channelInterpretation: "speakers"})') + .notThrow(); + should(node.channelInterpretation, 'node.channelInterpretation') + .beEqualTo('speakers'); + + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelInterpretation: 'discrete'})); + }, + 'new ' + nodeName + '(c, {channelInterpretation: "discrete"})') + .notThrow(); + should(node.channelInterpretation, 'node.channelInterpretation') + .beEqualTo('discrete'); + + should( + () => { + node = new window[nodeName]( + context, + Object.assign( + {}, expectedNodeOptions.additionalOptions, + {channelInterpretation: 'foobar'})); + }, + 'new ' + nodeName + '(c, {channelInterpretation: "foobar"})') + .throw(TypeError); + should( + node.channelInterpretation, + 'node.channelInterpretation after invalid setter') + .beEqualTo('discrete'); + } +} + +function initializeContext(should) { + let c; + should(() => { + c = new OfflineAudioContext(1, 1, 48000); + }, 'context = new OfflineAudioContext(...)').notThrow(); + + return c; +} + +function testInvalidConstructor(should, name, context) { + should(() => { + new window[name](); + }, 'new ' + name + '()').throw(TypeError); + should(() => { + new window[name](1); + }, 'new ' + name + '(1)').throw(TypeError); + should(() => { + new window[name](context, 42); + }, 'new ' + name + '(context, 42)').throw(TypeError); +} + +function testDefaultConstructor(should, name, context, options) { + let node; + + let message = options.prefix + ' = new ' + name + '(context'; + if (options.constructorOptions) + message += ', ' + JSON.stringify(options.constructorOptions); + message += ')' + + should(() => { + node = new window[name](context, options.constructorOptions); + }, message).notThrow(); + + should(node instanceof window[name], options.prefix + ' instanceof ' + name) + .beEqualTo(true); + should(node.numberOfInputs, options.prefix + '.numberOfInputs') + .beEqualTo(options.numberOfInputs); + should(node.numberOfOutputs, options.prefix + '.numberOfOutputs') + .beEqualTo(options.numberOfOutputs); + should(node.channelCount, options.prefix + '.channelCount') + .beEqualTo(options.channelCount); + should(node.channelCountMode, options.prefix + '.channelCountMode') + .beEqualTo(options.channelCountMode); + should(node.channelInterpretation, options.prefix + '.channelInterpretation') + .beEqualTo(options.channelInterpretation); + + return node; +} + +function testDefaultAttributes(should, node, prefix, items) { + items.forEach((item) => { + let attr = node[item.name]; + if (attr instanceof AudioParam) { + should(attr.value, prefix + '.' + item.name + '.value') + .beEqualTo(item.value); + } else { + should(attr, prefix + '.' + item.name).beEqualTo(item.value); + } + }); +} diff --git a/testing/web-platform/tests/webaudio/resources/audioparam-testing.js b/testing/web-platform/tests/webaudio/resources/audioparam-testing.js new file mode 100644 index 0000000000..bc90ddbef8 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/audioparam-testing.js @@ -0,0 +1,554 @@ +(function(global) { + + // Information about the starting/ending times and starting/ending values for + // each time interval. + let timeValueInfo; + + // The difference between starting values between each time interval. + let startingValueDelta; + + // For any automation function that has an end or target value, the end value + // is based the starting value of the time interval. The starting value will + // be increased or decreased by |startEndValueChange|. We choose half of + // |startingValueDelta| so that the ending value will be distinct from the + // starting value for next time interval. This allows us to detect where the + // ramp begins and ends. + let startEndValueChange; + + // Default threshold to use for detecting discontinuities that should appear + // at each time interval. + let discontinuityThreshold; + + // Time interval between value changes. It is best if 1 / numberOfTests is + // not close to timeInterval. + let timeIntervalInternal = .03; + + let context; + + // Make sure we render long enough to capture all of our test data. + function renderLength(numberOfTests) { + return timeToSampleFrame((numberOfTests + 1) * timeInterval, sampleRate); + } + + // Create a constant reference signal with the given |value|. Basically the + // same as |createConstantBuffer|, but with the parameters to match the other + // create functions. The |endValue| is ignored. + function createConstantArray( + startTime, endTime, value, endValue, sampleRate) { + let startFrame = timeToSampleFrame(startTime, sampleRate); + let endFrame = timeToSampleFrame(endTime, sampleRate); + let length = endFrame - startFrame; + + let buffer = createConstantBuffer(context, length, value); + + return buffer.getChannelData(0); + } + + function getStartEndFrames(startTime, endTime, sampleRate) { + // Start frame is the ceiling of the start time because the ramp starts at + // or after the sample frame. End frame is the ceiling because it's the + // exclusive ending frame of the automation. + let startFrame = Math.ceil(startTime * sampleRate); + let endFrame = Math.ceil(endTime * sampleRate); + + return {startFrame: startFrame, endFrame: endFrame}; + } + + // Create a linear ramp starting at |startValue| and ending at |endValue|. The + // ramp starts at time |startTime| and ends at |endTime|. (The start and end + // times are only used to compute how many samples to return.) + function createLinearRampArray( + startTime, endTime, startValue, endValue, sampleRate) { + let frameInfo = getStartEndFrames(startTime, endTime, sampleRate); + let startFrame = frameInfo.startFrame; + let endFrame = frameInfo.endFrame; + let length = endFrame - startFrame; + let array = new Array(length); + + let step = Math.fround( + (endValue - startValue) / (endTime - startTime) / sampleRate); + let start = Math.fround( + startValue + + (endValue - startValue) * (startFrame / sampleRate - startTime) / + (endTime - startTime)); + + let slope = (endValue - startValue) / (endTime - startTime); + + // v(t) = v0 + (v1 - v0)*(t-t0)/(t1-t0) + for (k = 0; k < length; ++k) { + // array[k] = Math.fround(start + k * step); + let t = (startFrame + k) / sampleRate; + array[k] = startValue + slope * (t - startTime); + } + + return array; + } + + // Create an exponential ramp starting at |startValue| and ending at + // |endValue|. The ramp starts at time |startTime| and ends at |endTime|. + // (The start and end times are only used to compute how many samples to + // return.) + function createExponentialRampArray( + startTime, endTime, startValue, endValue, sampleRate) { + let deltaTime = endTime - startTime; + + let frameInfo = getStartEndFrames(startTime, endTime, sampleRate); + let startFrame = frameInfo.startFrame; + let endFrame = frameInfo.endFrame; + let length = endFrame - startFrame; + let array = new Array(length); + + let ratio = endValue / startValue; + + // v(t) = v0*(v1/v0)^((t-t0)/(t1-t0)) + for (let k = 0; k < length; ++k) { + let t = Math.fround((startFrame + k) / sampleRate); + array[k] = Math.fround( + startValue * Math.pow(ratio, (t - startTime) / deltaTime)); + } + + return array; + } + + function discreteTimeConstantForSampleRate(timeConstant, sampleRate) { + return 1 - Math.exp(-1 / (sampleRate * timeConstant)); + } + + // Create a signal that starts at |startValue| and exponentially approaches + // the target value of |targetValue|, using a time constant of |timeConstant|. + // The ramp starts at time |startTime| and ends at |endTime|. (The start and + // end times are only used to compute how many samples to return.) + function createExponentialApproachArray( + startTime, endTime, startValue, targetValue, sampleRate, timeConstant) { + let startFrameFloat = startTime * sampleRate; + let frameInfo = getStartEndFrames(startTime, endTime, sampleRate); + let startFrame = frameInfo.startFrame; + let endFrame = frameInfo.endFrame; + let length = Math.floor(endFrame - startFrame); + let array = new Array(length); + let c = discreteTimeConstantForSampleRate(timeConstant, sampleRate); + + let delta = startValue - targetValue; + + // v(t) = v1 + (v0 - v1) * exp(-(t-t0)/tau) + for (let k = 0; k < length; ++k) { + let t = (startFrame + k) / sampleRate; + let value = + targetValue + delta * Math.exp(-(t - startTime) / timeConstant); + array[k] = value; + } + + return array; + } + + // Create a sine wave of the specified duration. + function createReferenceSineArray( + startTime, endTime, startValue, endValue, sampleRate) { + // Ignore |startValue| and |endValue| for the sine wave. + let curve = createSineWaveArray( + endTime - startTime, freqHz, sineAmplitude, sampleRate); + // Sample the curve appropriately. + let frameInfo = getStartEndFrames(startTime, endTime, sampleRate); + let startFrame = frameInfo.startFrame; + let endFrame = frameInfo.endFrame; + let length = Math.floor(endFrame - startFrame); + let array = new Array(length); + + // v(t) = linearly interpolate between V[k] and V[k + 1] where k = + // floor((N-1)/duration*(t - t0)) + let f = (length - 1) / (endTime - startTime); + + for (let k = 0; k < length; ++k) { + let t = (startFrame + k) / sampleRate; + let indexFloat = f * (t - startTime); + let index = Math.floor(indexFloat); + if (index + 1 < length) { + let v0 = curve[index]; + let v1 = curve[index + 1]; + array[k] = v0 + (v1 - v0) * (indexFloat - index); + } else { + array[k] = curve[length - 1]; + } + } + + return array; + } + + // Create a sine wave of the given frequency and amplitude. The sine wave is + // offset by half the amplitude so that result is always positive. + function createSineWaveArray(durationSeconds, freqHz, amplitude, sampleRate) { + let length = timeToSampleFrame(durationSeconds, sampleRate); + let signal = new Float32Array(length); + let omega = 2 * Math.PI * freqHz / sampleRate; + let halfAmplitude = amplitude / 2; + + for (let k = 0; k < length; ++k) { + signal[k] = halfAmplitude + halfAmplitude * Math.sin(omega * k); + } + + return signal; + } + + // Return the difference between the starting value and the ending value for + // time interval |timeIntervalIndex|. We alternate between an end value that + // is above or below the starting value. + function endValueDelta(timeIntervalIndex) { + if (timeIntervalIndex & 1) { + return -startEndValueChange; + } else { + return startEndValueChange; + } + } + + // Relative error metric + function relativeErrorMetric(actual, expected) { + return (actual - expected) / Math.abs(expected); + } + + // Difference metric + function differenceErrorMetric(actual, expected) { + return actual - expected; + } + + // Return the difference between the starting value at |timeIntervalIndex| and + // the starting value at the next time interval. Since we started at a large + // initial value, we decrease the value at each time interval. + function valueUpdate(timeIntervalIndex) { + return -startingValueDelta; + } + + // Compare a section of the rendered data against our expected signal. + function comparePartialSignals( + should, rendered, expectedFunction, startTime, endTime, valueInfo, + sampleRate, errorMetric) { + let startSample = timeToSampleFrame(startTime, sampleRate); + let expected = expectedFunction( + startTime, endTime, valueInfo.startValue, valueInfo.endValue, + sampleRate, timeConstant); + + let n = expected.length; + let maxError = -1; + let maxErrorIndex = -1; + + for (let k = 0; k < n; ++k) { + // Make sure we don't pass these tests because a NaN has been generated in + // either the + // rendered data or the reference data. + if (!isValidNumber(rendered[startSample + k])) { + maxError = Infinity; + maxErrorIndex = startSample + k; + should( + isValidNumber(rendered[startSample + k]), + 'NaN or infinity for rendered data at ' + maxErrorIndex) + .beTrue(); + break; + } + if (!isValidNumber(expected[k])) { + maxError = Infinity; + maxErrorIndex = startSample + k; + should( + isValidNumber(expected[k]), + 'NaN or infinity for rendered data at ' + maxErrorIndex) + .beTrue(); + break; + } + let error = Math.abs(errorMetric(rendered[startSample + k], expected[k])); + if (error > maxError) { + maxError = error; + maxErrorIndex = k; + } + } + + return {maxError: maxError, index: maxErrorIndex, expected: expected}; + } + + // Find the discontinuities in the data and compare the locations of the + // discontinuities with the times that define the time intervals. There is a + // discontinuity if the difference between successive samples exceeds the + // threshold. + function verifyDiscontinuities(should, values, times, threshold) { + let n = values.length; + let success = true; + let badLocations = 0; + let breaks = []; + + // Find discontinuities. + for (let k = 1; k < n; ++k) { + if (Math.abs(values[k] - values[k - 1]) > threshold) { + breaks.push(k); + } + } + + let testCount; + + // If there are numberOfTests intervals, there are only numberOfTests - 1 + // internal interval boundaries. Hence the maximum number of discontinuties + // we expect to find is numberOfTests - 1. If we find more than that, we + // have no reference to compare against. We also assume that the actual + // discontinuities are close to the expected ones. + // + // This is just a sanity check when something goes really wrong. For + // example, if the threshold is too low, every sample frame looks like a + // discontinuity. + if (breaks.length >= numberOfTests) { + testCount = numberOfTests - 1; + should(breaks.length, 'Number of discontinuities') + .beLessThan(numberOfTests); + success = false; + } else { + testCount = breaks.length; + } + + // Compare the location of each discontinuity with the end time of each + // interval. (There is no discontinuity at the start of the signal.) + for (let k = 0; k < testCount; ++k) { + let expectedSampleFrame = timeToSampleFrame(times[k + 1], sampleRate); + if (breaks[k] != expectedSampleFrame) { + success = false; + ++badLocations; + should(breaks[k], 'Discontinuity at index') + .beEqualTo(expectedSampleFrame); + } + } + + if (badLocations) { + should(badLocations, 'Number of discontinuites at incorrect locations') + .beEqualTo(0); + success = false; + } else { + should( + breaks.length + 1, + 'Number of tests started and ended at the correct time') + .beEqualTo(numberOfTests); + } + + return success; + } + + // Compare the rendered data with the expected data. + // + // testName - string describing the test + // + // maxError - maximum allowed difference between the rendered data and the + // expected data + // + // rendererdData - array containing the rendered (actual) data + // + // expectedFunction - function to compute the expected data + // + // timeValueInfo - array containing information about the start and end times + // and the start and end values of each interval. + // + // breakThreshold - threshold to use for determining discontinuities. + function compareSignals( + should, testName, maxError, renderedData, expectedFunction, timeValueInfo, + breakThreshold, errorMetric) { + let success = true; + let failedTestCount = 0; + let times = timeValueInfo.times; + let values = timeValueInfo.values; + let n = values.length; + let expectedSignal = []; + + success = + verifyDiscontinuities(should, renderedData, times, breakThreshold); + + for (let k = 0; k < n; ++k) { + let result = comparePartialSignals( + should, renderedData, expectedFunction, times[k], times[k + 1], + values[k], sampleRate, errorMetric); + + expectedSignal = + expectedSignal.concat(Array.prototype.slice.call(result.expected)); + + should( + result.maxError, + 'Max error for test ' + k + ' at offset ' + + (result.index + timeToSampleFrame(times[k], sampleRate))) + .beLessThanOrEqualTo(maxError); + } + + should( + failedTestCount, + 'Number of failed tests with an acceptable relative tolerance of ' + + maxError) + .beEqualTo(0); + } + + // Create a function to test the rendered data with the reference data. + // + // testName - string describing the test + // + // error - max allowed error between rendered data and the reference data. + // + // referenceFunction - function that generates the reference data to be + // compared with the rendered data. + // + // jumpThreshold - optional parameter that specifies the threshold to use for + // detecting discontinuities. If not specified, defaults to + // discontinuityThreshold. + // + function checkResultFunction( + task, should, testName, error, referenceFunction, jumpThreshold, + errorMetric) { + return function(event) { + let buffer = event.renderedBuffer; + renderedData = buffer.getChannelData(0); + + let threshold; + + if (!jumpThreshold) { + threshold = discontinuityThreshold; + } else { + threshold = jumpThreshold; + } + + compareSignals( + should, testName, error, renderedData, referenceFunction, + timeValueInfo, threshold, errorMetric); + task.done(); + } + } + + // Run all the automation tests. + // + // numberOfTests - number of tests (time intervals) to run. + // + // initialValue - The initial value of the first time interval. + // + // setValueFunction - function that sets the specified value at the start of a + // time interval. + // + // automationFunction - function that sets the end value for the time + // interval. It specifies how the value approaches the end value. + // + // An object is returned containing an array of start times for each time + // interval, and an array giving the start and end values for the interval. + function doAutomation( + numberOfTests, initialValue, setValueFunction, automationFunction) { + let timeInfo = [0]; + let valueInfo = []; + let value = initialValue; + + for (let k = 0; k < numberOfTests; ++k) { + let startTime = k * timeInterval; + let endTime = (k + 1) * timeInterval; + let endValue = value + endValueDelta(k); + + // Set the value at the start of the time interval. + setValueFunction(value, startTime); + + // Specify the end or target value, and how we should approach it. + automationFunction(endValue, startTime, endTime); + + // Keep track of the start times, and the start and end values for each + // time interval. + timeInfo.push(endTime); + valueInfo.push({startValue: value, endValue: endValue}); + + value += valueUpdate(k); + } + + return {times: timeInfo, values: valueInfo}; + } + + // Create the audio graph for the test and then run the test. + // + // numberOfTests - number of time intervals (tests) to run. + // + // initialValue - the initial value of the gain at time 0. + // + // setValueFunction - function to set the value at the beginning of each time + // interval. + // + // automationFunction - the AudioParamTimeline automation function + // + // testName - string indicating the test that is being run. + // + // maxError - maximum allowed error between the rendered data and the + // reference data + // + // referenceFunction - function that generates the reference data to be + // compared against the rendered data. + // + // jumpThreshold - optional parameter that specifies the threshold to use for + // detecting discontinuities. If not specified, defaults to + // discontinuityThreshold. + // + function createAudioGraphAndTest( + task, should, numberOfTests, initialValue, setValueFunction, + automationFunction, testName, maxError, referenceFunction, jumpThreshold, + errorMetric) { + // Create offline audio context. + context = + new OfflineAudioContext(2, renderLength(numberOfTests), sampleRate); + let constantBuffer = + createConstantBuffer(context, renderLength(numberOfTests), 1); + + // We use an AudioGainNode here simply as a convenient way to test the + // AudioParam automation, since it's easy to pass a constant value through + // the node, automate the .gain attribute and observe the resulting values. + + gainNode = context.createGain(); + + let bufferSource = context.createBufferSource(); + bufferSource.buffer = constantBuffer; + bufferSource.connect(gainNode); + gainNode.connect(context.destination); + + // Set up default values for the parameters that control how the automation + // test values progress for each time interval. + startingValueDelta = initialValue / numberOfTests; + startEndValueChange = startingValueDelta / 2; + discontinuityThreshold = startEndValueChange / 2; + + // Run the automation tests. + timeValueInfo = doAutomation( + numberOfTests, initialValue, setValueFunction, automationFunction); + bufferSource.start(0); + + context.oncomplete = checkResultFunction( + task, should, testName, maxError, referenceFunction, jumpThreshold, + errorMetric || relativeErrorMetric); + context.startRendering(); + } + + // Export local references to global scope. All the new objects in this file + // must be exported through this if it is to be used in the actual test HTML + // page. + let exports = { + 'sampleRate': 44100, + 'gainNode': null, + 'timeInterval': timeIntervalInternal, + + // Some suitable time constant so that we can see a significant change over + // a timeInterval. This is only needed by setTargetAtTime() which needs a + // time constant. + 'timeConstant': timeIntervalInternal / 3, + + 'renderLength': renderLength, + 'createConstantArray': createConstantArray, + 'getStartEndFrames': getStartEndFrames, + 'createLinearRampArray': createLinearRampArray, + 'createExponentialRampArray': createExponentialRampArray, + 'discreteTimeConstantForSampleRate': discreteTimeConstantForSampleRate, + 'createExponentialApproachArray': createExponentialApproachArray, + 'createReferenceSineArray': createReferenceSineArray, + 'createSineWaveArray': createSineWaveArray, + 'endValueDelta': endValueDelta, + 'relativeErrorMetric': relativeErrorMetric, + 'differenceErrorMetric': differenceErrorMetric, + 'valueUpdate': valueUpdate, + 'comparePartialSignals': comparePartialSignals, + 'verifyDiscontinuities': verifyDiscontinuities, + 'compareSignals': compareSignals, + 'checkResultFunction': checkResultFunction, + 'doAutomation': doAutomation, + 'createAudioGraphAndTest': createAudioGraphAndTest + }; + + for (let reference in exports) { + global[reference] = exports[reference]; + } + +})(window); diff --git a/testing/web-platform/tests/webaudio/resources/audit-util.js b/testing/web-platform/tests/webaudio/resources/audit-util.js new file mode 100644 index 0000000000..a4dea79658 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/audit-util.js @@ -0,0 +1,195 @@ +// Copyright 2016 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + + +/** + * @fileOverview This file includes legacy utility functions for the layout + * test. + */ + +// How many frames in a WebAudio render quantum. +let RENDER_QUANTUM_FRAMES = 128; + +// Compare two arrays (commonly extracted from buffer.getChannelData()) with +// constraints: +// options.thresholdSNR: Minimum allowed SNR between the actual and expected +// signal. The default value is 10000. +// options.thresholdDiffULP: Maximum allowed difference between the actual +// and expected signal in ULP(Unit in the last place). The default is 0. +// options.thresholdDiffCount: Maximum allowed number of sample differences +// which exceeds the threshold. The default is 0. +// options.bitDepth: The expected result is assumed to come from an audio +// file with this number of bits of precision. The default is 16. +function compareBuffersWithConstraints(should, actual, expected, options) { + if (!options) + options = {}; + + // Only print out the message if the lengths are different; the + // expectation is that they are the same, so don't clutter up the + // output. + if (actual.length !== expected.length) { + should( + actual.length === expected.length, + 'Length of actual and expected buffers should match') + .beTrue(); + } + + let maxError = -1; + let diffCount = 0; + let errorPosition = -1; + let thresholdSNR = (options.thresholdSNR || 10000); + + let thresholdDiffULP = (options.thresholdDiffULP || 0); + let thresholdDiffCount = (options.thresholdDiffCount || 0); + + // By default, the bit depth is 16. + let bitDepth = (options.bitDepth || 16); + let scaleFactor = Math.pow(2, bitDepth - 1); + + let noisePower = 0, signalPower = 0; + + for (let i = 0; i < actual.length; i++) { + let diff = actual[i] - expected[i]; + noisePower += diff * diff; + signalPower += expected[i] * expected[i]; + + if (Math.abs(diff) > maxError) { + maxError = Math.abs(diff); + errorPosition = i; + } + + // The reference file is a 16-bit WAV file, so we will almost never get + // an exact match between it and the actual floating-point result. + if (Math.abs(diff) > scaleFactor) + diffCount++; + } + + let snr = 10 * Math.log10(signalPower / noisePower); + let maxErrorULP = maxError * scaleFactor; + + should(snr, 'SNR').beGreaterThanOrEqualTo(thresholdSNR); + + should( + maxErrorULP, + options.prefix + ': Maximum difference (in ulp units (' + bitDepth + + '-bits))') + .beLessThanOrEqualTo(thresholdDiffULP); + + should(diffCount, options.prefix + ': Number of differences between results') + .beLessThanOrEqualTo(thresholdDiffCount); +} + +// Create an impulse in a buffer of length sampleFrameLength +function createImpulseBuffer(context, sampleFrameLength) { + let audioBuffer = + context.createBuffer(1, sampleFrameLength, context.sampleRate); + let n = audioBuffer.length; + let dataL = audioBuffer.getChannelData(0); + + for (let k = 0; k < n; ++k) { + dataL[k] = 0; + } + dataL[0] = 1; + + return audioBuffer; +} + +// Create a buffer of the given length with a linear ramp having values 0 <= x < +// 1. +function createLinearRampBuffer(context, sampleFrameLength) { + let audioBuffer = + context.createBuffer(1, sampleFrameLength, context.sampleRate); + let n = audioBuffer.length; + let dataL = audioBuffer.getChannelData(0); + + for (let i = 0; i < n; ++i) + dataL[i] = i / n; + + return audioBuffer; +} + +// Create an AudioBuffer of length |sampleFrameLength| having a constant value +// |constantValue|. If |constantValue| is a number, the buffer has one channel +// filled with that value. If |constantValue| is an array, the buffer is created +// wit a number of channels equal to the length of the array, and channel k is +// filled with the k'th element of the |constantValue| array. +function createConstantBuffer(context, sampleFrameLength, constantValue) { + let channels; + let values; + + if (typeof constantValue === 'number') { + channels = 1; + values = [constantValue]; + } else { + channels = constantValue.length; + values = constantValue; + } + + let audioBuffer = + context.createBuffer(channels, sampleFrameLength, context.sampleRate); + let n = audioBuffer.length; + + for (let c = 0; c < channels; ++c) { + let data = audioBuffer.getChannelData(c); + for (let i = 0; i < n; ++i) + data[i] = values[c]; + } + + return audioBuffer; +} + +// Create a stereo impulse in a buffer of length sampleFrameLength +function createStereoImpulseBuffer(context, sampleFrameLength) { + let audioBuffer = + context.createBuffer(2, sampleFrameLength, context.sampleRate); + let n = audioBuffer.length; + let dataL = audioBuffer.getChannelData(0); + let dataR = audioBuffer.getChannelData(1); + + for (let k = 0; k < n; ++k) { + dataL[k] = 0; + dataR[k] = 0; + } + dataL[0] = 1; + dataR[0] = 1; + + return audioBuffer; +} + +// Convert time (in seconds) to sample frames. +function timeToSampleFrame(time, sampleRate) { + return Math.floor(0.5 + time * sampleRate); +} + +// Compute the number of sample frames consumed by noteGrainOn with +// the specified |grainOffset|, |duration|, and |sampleRate|. +function grainLengthInSampleFrames(grainOffset, duration, sampleRate) { + let startFrame = timeToSampleFrame(grainOffset, sampleRate); + let endFrame = timeToSampleFrame(grainOffset + duration, sampleRate); + + return endFrame - startFrame; +} + +// True if the number is not an infinity or NaN +function isValidNumber(x) { + return !isNaN(x) && (x != Infinity) && (x != -Infinity); +} + +// Compute the (linear) signal-to-noise ratio between |actual| and +// |expected|. The result is NOT in dB! If the |actual| and +// |expected| have different lengths, the shorter length is used. +function computeSNR(actual, expected) { + let signalPower = 0; + let noisePower = 0; + + let length = Math.min(actual.length, expected.length); + + for (let k = 0; k < length; ++k) { + let diff = actual[k] - expected[k]; + signalPower += expected[k] * expected[k]; + noisePower += diff * diff; + } + + return signalPower / noisePower; +} diff --git a/testing/web-platform/tests/webaudio/resources/audit.js b/testing/web-platform/tests/webaudio/resources/audit.js new file mode 100644 index 0000000000..ed0078b9c5 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/audit.js @@ -0,0 +1,1447 @@ +// Copyright 2016 The Chromium Authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// See https://github.com/web-platform-tests/wpt/issues/12781 for information on +// the purpose of audit.js, and why testharness.js does not suffice. + +/** + * @fileOverview WebAudio layout test utility library. Built around W3C's + * testharness.js. Includes asynchronous test task manager, + * assertion utilities. + * @dependency testharness.js + */ + + +(function() { + + 'use strict'; + + // Selected methods from testharness.js. + let testharnessProperties = [ + 'test', 'async_test', 'promise_test', 'promise_rejects_js', 'generate_tests', + 'setup', 'done', 'assert_true', 'assert_false' + ]; + + // Check if testharness.js is properly loaded. Throw otherwise. + for (let name in testharnessProperties) { + if (!self.hasOwnProperty(testharnessProperties[name])) + throw new Error('Cannot proceed. testharness.js is not loaded.'); + } +})(); + + +window.Audit = (function() { + + 'use strict'; + + // NOTE: Moving this method (or any other code above) will change the location + // of 'CONSOLE ERROR...' message in the expected text files. + function _logError(message) { + console.error('[audit.js] ' + message); + } + + function _logPassed(message) { + test(function(arg) { + assert_true(true); + }, message); + } + + function _logFailed(message, detail) { + test(function() { + assert_true(false, detail); + }, message); + } + + function _throwException(message) { + throw new Error(message); + } + + // TODO(hongchan): remove this hack after confirming all the tests are + // finished correctly. (crbug.com/708817) + const _testharnessDone = window.done; + window.done = () => { + _throwException('Do NOT call done() method from the test code.'); + }; + + // Generate a descriptive string from a target value in various types. + function _generateDescription(target, options) { + let targetString; + + switch (typeof target) { + case 'object': + // Handle Arrays. + if (target instanceof Array || target instanceof Float32Array || + target instanceof Float64Array || target instanceof Uint8Array) { + let arrayElements = target.length < options.numberOfArrayElements ? + String(target) : + String(target.slice(0, options.numberOfArrayElements)) + '...'; + targetString = '[' + arrayElements + ']'; + } else if (target === null) { + targetString = String(target); + } else { + targetString = '' + String(target).split(/[\s\]]/)[1]; + } + break; + case 'function': + if (Error.isPrototypeOf(target)) { + targetString = "EcmaScript error " + target.name; + } else { + targetString = String(target); + } + break; + default: + targetString = String(target); + break; + } + + return targetString; + } + + // Return a string suitable for printing one failed element in + // |beCloseToArray|. + function _formatFailureEntry(index, actual, expected, abserr, threshold) { + return '\t[' + index + ']\t' + actual.toExponential(16) + '\t' + + expected.toExponential(16) + '\t' + abserr.toExponential(16) + '\t' + + (abserr / Math.abs(expected)).toExponential(16) + '\t' + + threshold.toExponential(16); + } + + // Compute the error threshold criterion for |beCloseToArray| + function _closeToThreshold(abserr, relerr, expected) { + return Math.max(abserr, relerr * Math.abs(expected)); + } + + /** + * @class Should + * @description Assertion subtask for the Audit task. + * @param {Task} parentTask Associated Task object. + * @param {Any} actual Target value to be tested. + * @param {String} actualDescription String description of the test target. + */ + class Should { + constructor(parentTask, actual, actualDescription) { + this._task = parentTask; + + this._actual = actual; + this._actualDescription = (actualDescription || null); + this._expected = null; + this._expectedDescription = null; + + this._detail = ''; + // If true and the test failed, print the actual value at the + // end of the message. + this._printActualForFailure = true; + + this._result = null; + + /** + * @param {Number} numberOfErrors Number of errors to be printed. + * @param {Number} numberOfArrayElements Number of array elements to be + * printed in the test log. + * @param {Boolean} verbose Verbose output from the assertion. + */ + this._options = { + numberOfErrors: 4, + numberOfArrayElements: 16, + verbose: false + }; + } + + _processArguments(args) { + if (args.length === 0) + return; + + if (args.length > 0) + this._expected = args[0]; + + if (typeof args[1] === 'string') { + // case 1: (expected, description, options) + this._expectedDescription = args[1]; + Object.assign(this._options, args[2]); + } else if (typeof args[1] === 'object') { + // case 2: (expected, options) + Object.assign(this._options, args[1]); + } + } + + _buildResultText() { + if (this._result === null) + _throwException('Illegal invocation: the assertion is not finished.'); + + let actualString = _generateDescription(this._actual, this._options); + + // Use generated text when the description is not provided. + if (!this._actualDescription) + this._actualDescription = actualString; + + if (!this._expectedDescription) { + this._expectedDescription = + _generateDescription(this._expected, this._options); + } + + // For the assertion with a single operand. + this._detail = + this._detail.replace(/\$\{actual\}/g, this._actualDescription); + + // If there is a second operand (i.e. expected value), we have to build + // the string for it as well. + this._detail = + this._detail.replace(/\$\{expected\}/g, this._expectedDescription); + + // If there is any property in |_options|, replace the property name + // with the value. + for (let name in this._options) { + if (name === 'numberOfErrors' || name === 'numberOfArrayElements' || + name === 'verbose') { + continue; + } + + // The RegExp key string contains special character. Take care of it. + let re = '\$\{' + name + '\}'; + re = re.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, '\\$1'); + this._detail = this._detail.replace( + new RegExp(re, 'g'), _generateDescription(this._options[name])); + } + + // If the test failed, add the actual value at the end. + if (this._result === false && this._printActualForFailure === true) { + this._detail += ' Got ' + actualString + '.'; + } + } + + _finalize() { + if (this._result) { + _logPassed(' ' + this._detail); + } else { + _logFailed('X ' + this._detail); + } + + // This assertion is finished, so update the parent task accordingly. + this._task.update(this); + + // TODO(hongchan): configurable 'detail' message. + } + + _assert(condition, passDetail, failDetail) { + this._result = Boolean(condition); + this._detail = this._result ? passDetail : failDetail; + this._buildResultText(); + this._finalize(); + + return this._result; + } + + get result() { + return this._result; + } + + get detail() { + return this._detail; + } + + /** + * should() assertions. + * + * @example All the assertions can have 1, 2 or 3 arguments: + * should().doAssert(expected); + * should().doAssert(expected, options); + * should().doAssert(expected, expectedDescription, options); + * + * @param {Any} expected Expected value of the assertion. + * @param {String} expectedDescription Description of expected value. + * @param {Object} options Options for assertion. + * @param {Number} options.numberOfErrors Number of errors to be printed. + * (if applicable) + * @param {Number} options.numberOfArrayElements Number of array elements + * to be printed. (if + * applicable) + * @notes Some assertions can have additional options for their specific + * testing. + */ + + /** + * Check if |actual| exists. + * + * @example + * should({}, 'An empty object').exist(); + * @result + * "PASS An empty object does exist." + */ + exist() { + return this._assert( + this._actual !== null && this._actual !== undefined, + '${actual} does exist.', '${actual} does not exist.'); + } + + /** + * Check if |actual| operation wrapped in a function throws an exception + * with a expected error type correctly. |expected| is optional. If it is an + * instance of DOMException, then the description (second argument) can be + * provided to be more strict about the expected exception type. |expected| + * also can be other generic error types such as TypeError, RangeError or + * etc. + * + * @example + * should(() => { let a = b; }, 'A bad code').throw(); + * should(() => { new SomeConstructor(); }, 'A bad construction') + * .throw(DOMException, 'NotSupportedError'); + * should(() => { let c = d; }, 'Assigning d to c') + * .throw(ReferenceError); + * should(() => { let e = f; }, 'Assigning e to f') + * .throw(ReferenceError, { omitErrorMessage: true }); + * + * @result + * "PASS A bad code threw an exception of ReferenceError: b is not + * defined." + * "PASS A bad construction threw DOMException:NotSupportedError." + * "PASS Assigning d to c threw ReferenceError: d is not defined." + * "PASS Assigning e to f threw ReferenceError: [error message + * omitted]." + */ + throw() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let didThrowCorrectly = false; + let passDetail, failDetail; + + try { + // This should throw. + this._actual(); + // Catch did not happen, so the test is failed. + failDetail = '${actual} did not throw an exception.'; + } catch (error) { + let errorMessage = this._options.omitErrorMessage ? + ': [error message omitted]' : + ': "' + error.message + '"'; + if (this._expected === null || this._expected === undefined) { + // The expected error type was not given. + didThrowCorrectly = true; + passDetail = '${actual} threw ' + error.name + errorMessage + '.'; + } else if (this._expected === DOMException && + this._expectedDescription !== undefined) { + // Handles DOMException with an expected exception name. + if (this._expectedDescription === error.name) { + didThrowCorrectly = true; + passDetail = '${actual} threw ${expected}' + errorMessage + '.'; + } else { + didThrowCorrectly = false; + failDetail = + '${actual} threw "' + error.name + '" instead of ${expected}.'; + } + } else if (this._expected == error.constructor) { + // Handler other error types. + didThrowCorrectly = true; + passDetail = '${actual} threw ' + error.name + errorMessage + '.'; + } else { + didThrowCorrectly = false; + failDetail = + '${actual} threw "' + error.name + '" instead of ${expected}.'; + } + } + + return this._assert(didThrowCorrectly, passDetail, failDetail); + } + + /** + * Check if |actual| operation wrapped in a function does not throws an + * exception correctly. + * + * @example + * should(() => { let foo = 'bar'; }, 'let foo = "bar"').notThrow(); + * + * @result + * "PASS let foo = "bar" did not throw an exception." + */ + notThrow() { + this._printActualForFailure = false; + + let didThrowCorrectly = false; + let passDetail, failDetail; + + try { + this._actual(); + passDetail = '${actual} did not throw an exception.'; + } catch (error) { + didThrowCorrectly = true; + failDetail = '${actual} incorrectly threw ' + error.name + ': "' + + error.message + '".'; + } + + return this._assert(!didThrowCorrectly, passDetail, failDetail); + } + + /** + * Check if |actual| promise is resolved correctly. Note that the returned + * result from promise object will be passed to the following then() + * function. + * + * @example + * should('My promise', promise).beResolve().then((result) => { + * log(result); + * }); + * + * @result + * "PASS My promise resolved correctly." + * "FAIL X My promise rejected *INCORRECTLY* with _ERROR_." + */ + beResolved() { + return this._actual.then( + function(result) { + this._assert(true, '${actual} resolved correctly.', null); + return result; + }.bind(this), + function(error) { + this._assert( + false, null, + '${actual} rejected incorrectly with ' + error + '.'); + }.bind(this)); + } + + /** + * Check if |actual| promise is rejected correctly. + * + * @example + * should('My promise', promise).beRejected().then(nextStuff); + * + * @result + * "PASS My promise rejected correctly (with _ERROR_)." + * "FAIL X My promise resolved *INCORRECTLY*." + */ + beRejected() { + return this._actual.then( + function() { + this._assert(false, null, '${actual} resolved incorrectly.'); + }.bind(this), + function(error) { + this._assert( + true, '${actual} rejected correctly with ' + error + '.', null); + }.bind(this)); + } + + /** + * Check if |actual| promise is rejected correctly. + * + * @example + * should(promise, 'My promise').beRejectedWith('_ERROR_').then(); + * + * @result + * "PASS My promise rejected correctly with _ERROR_." + * "FAIL X My promise rejected correctly but got _ACTUAL_ERROR instead of + * _EXPECTED_ERROR_." + * "FAIL X My promise resolved incorrectly." + */ + beRejectedWith() { + this._processArguments(arguments); + + return this._actual.then( + function() { + this._assert(false, null, '${actual} resolved incorrectly.'); + }.bind(this), + function(error) { + if (this._expected !== error.name) { + this._assert( + false, null, + '${actual} rejected correctly but got ' + error.name + + ' instead of ' + this._expected + '.'); + } else { + this._assert( + true, + '${actual} rejected correctly with ' + this._expected + '.', + null); + } + }.bind(this)); + } + + /** + * Check if |actual| is a boolean true. + * + * @example + * should(3 < 5, '3 < 5').beTrue(); + * + * @result + * "PASS 3 < 5 is true." + */ + beTrue() { + return this._assert( + this._actual === true, '${actual} is true.', + '${actual} is not true.'); + } + + /** + * Check if |actual| is a boolean false. + * + * @example + * should(3 > 5, '3 > 5').beFalse(); + * + * @result + * "PASS 3 > 5 is false." + */ + beFalse() { + return this._assert( + this._actual === false, '${actual} is false.', + '${actual} is not false.'); + } + + /** + * Check if |actual| is strictly equal to |expected|. (no type coercion) + * + * @example + * should(1).beEqualTo(1); + * + * @result + * "PASS 1 is equal to 1." + */ + beEqualTo() { + this._processArguments(arguments); + return this._assert( + this._actual === this._expected, '${actual} is equal to ${expected}.', + '${actual} is not equal to ${expected}.'); + } + + /** + * Check if |actual| is not equal to |expected|. + * + * @example + * should(1).notBeEqualTo(2); + * + * @result + * "PASS 1 is not equal to 2." + */ + notBeEqualTo() { + this._processArguments(arguments); + return this._assert( + this._actual !== this._expected, + '${actual} is not equal to ${expected}.', + '${actual} should not be equal to ${expected}.'); + } + + /** + * check if |actual| is NaN + * + * @example + * should(NaN).beNaN(); + * + * @result + * "PASS NaN is NaN" + * + */ + beNaN() { + this._processArguments(arguments); + return this._assert( + isNaN(this._actual), + '${actual} is NaN.', + '${actual} is not NaN but should be.'); + } + + /** + * check if |actual| is NOT NaN + * + * @example + * should(42).notBeNaN(); + * + * @result + * "PASS 42 is not NaN" + * + */ + notBeNaN() { + this._processArguments(arguments); + return this._assert( + !isNaN(this._actual), + '${actual} is not NaN.', + '${actual} is NaN but should not be.'); + } + + /** + * Check if |actual| is greater than |expected|. + * + * @example + * should(2).beGreaterThanOrEqualTo(2); + * + * @result + * "PASS 2 is greater than or equal to 2." + */ + beGreaterThan() { + this._processArguments(arguments); + return this._assert( + this._actual > this._expected, + '${actual} is greater than ${expected}.', + '${actual} is not greater than ${expected}.'); + } + + /** + * Check if |actual| is greater than or equal to |expected|. + * + * @example + * should(2).beGreaterThan(1); + * + * @result + * "PASS 2 is greater than 1." + */ + beGreaterThanOrEqualTo() { + this._processArguments(arguments); + return this._assert( + this._actual >= this._expected, + '${actual} is greater than or equal to ${expected}.', + '${actual} is not greater than or equal to ${expected}.'); + } + + /** + * Check if |actual| is less than |expected|. + * + * @example + * should(1).beLessThan(2); + * + * @result + * "PASS 1 is less than 2." + */ + beLessThan() { + this._processArguments(arguments); + return this._assert( + this._actual < this._expected, '${actual} is less than ${expected}.', + '${actual} is not less than ${expected}.'); + } + + /** + * Check if |actual| is less than or equal to |expected|. + * + * @example + * should(1).beLessThanOrEqualTo(1); + * + * @result + * "PASS 1 is less than or equal to 1." + */ + beLessThanOrEqualTo() { + this._processArguments(arguments); + return this._assert( + this._actual <= this._expected, + '${actual} is less than or equal to ${expected}.', + '${actual} is not less than or equal to ${expected}.'); + } + + /** + * Check if |actual| array is filled with a constant |expected| value. + * + * @example + * should([1, 1, 1]).beConstantValueOf(1); + * + * @result + * "PASS [1,1,1] contains only the constant 1." + */ + beConstantValueOf() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let passed = true; + let passDetail, failDetail; + let errors = {}; + + let actual = this._actual; + let expected = this._expected; + for (let index = 0; index < actual.length; ++index) { + if (actual[index] !== expected) + errors[index] = actual[index]; + } + + let numberOfErrors = Object.keys(errors).length; + passed = numberOfErrors === 0; + + if (passed) { + passDetail = '${actual} contains only the constant ${expected}.'; + } else { + let counter = 0; + failDetail = + '${actual}: Expected ${expected} for all values but found ' + + numberOfErrors + ' unexpected values: '; + failDetail += '\n\tIndex\tActual'; + for (let errorIndex in errors) { + failDetail += '\n\t[' + errorIndex + ']' + + '\t' + errors[errorIndex]; + if (++counter >= this._options.numberOfErrors) { + failDetail += + '\n\t...and ' + (numberOfErrors - counter) + ' more errors.'; + break; + } + } + } + + return this._assert(passed, passDetail, failDetail); + } + + /** + * Check if |actual| array is not filled with a constant |expected| value. + * + * @example + * should([1, 0, 1]).notBeConstantValueOf(1); + * should([0, 0, 0]).notBeConstantValueOf(0); + * + * @result + * "PASS [1,0,1] is not constantly 1 (contains 1 different value)." + * "FAIL X [0,0,0] should have contain at least one value different + * from 0." + */ + notBeConstantValueOf() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let passed = true; + let passDetail; + let failDetail; + let differences = {}; + + let actual = this._actual; + let expected = this._expected; + for (let index = 0; index < actual.length; ++index) { + if (actual[index] !== expected) + differences[index] = actual[index]; + } + + let numberOfDifferences = Object.keys(differences).length; + passed = numberOfDifferences > 0; + + if (passed) { + let valueString = numberOfDifferences > 1 ? 'values' : 'value'; + passDetail = '${actual} is not constantly ${expected} (contains ' + + numberOfDifferences + ' different ' + valueString + ').'; + } else { + failDetail = '${actual} should have contain at least one value ' + + 'different from ${expected}.'; + } + + return this._assert(passed, passDetail, failDetail); + } + + /** + * Check if |actual| array is identical to |expected| array element-wise. + * + * @example + * should([1, 2, 3]).beEqualToArray([1, 2, 3]); + * + * @result + * "[1,2,3] is identical to the array [1,2,3]." + */ + beEqualToArray() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let passed = true; + let passDetail, failDetail; + let errorIndices = []; + + if (this._actual.length !== this._expected.length) { + passed = false; + failDetail = 'The array length does not match.'; + return this._assert(passed, passDetail, failDetail); + } + + let actual = this._actual; + let expected = this._expected; + for (let index = 0; index < actual.length; ++index) { + if (actual[index] !== expected[index]) + errorIndices.push(index); + } + + passed = errorIndices.length === 0; + + if (passed) { + passDetail = '${actual} is identical to the array ${expected}.'; + } else { + let counter = 0; + failDetail = + '${actual} expected to be equal to the array ${expected} ' + + 'but differs in ' + errorIndices.length + ' places:' + + '\n\tIndex\tActual\t\t\tExpected'; + for (let index of errorIndices) { + failDetail += '\n\t[' + index + ']' + + '\t' + this._actual[index].toExponential(16) + '\t' + + this._expected[index].toExponential(16); + if (++counter >= this._options.numberOfErrors) { + failDetail += '\n\t...and ' + (errorIndices.length - counter) + + ' more errors.'; + break; + } + } + } + + return this._assert(passed, passDetail, failDetail); + } + + /** + * Check if |actual| array contains only the values in |expected| in the + * order of values in |expected|. + * + * @example + * Should([1, 1, 3, 3, 2], 'My random array').containValues([1, 3, 2]); + * + * @result + * "PASS [1,1,3,3,2] contains all the expected values in the correct + * order: [1,3,2]. + */ + containValues() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let passed = true; + let indexedActual = []; + let firstErrorIndex = null; + + // Collect the unique value sequence from the actual. + for (let i = 0, prev = null; i < this._actual.length; i++) { + if (this._actual[i] !== prev) { + indexedActual.push({index: i, value: this._actual[i]}); + prev = this._actual[i]; + } + } + + // Compare against the expected sequence. + let failMessage = + '${actual} expected to have the value sequence of ${expected} but ' + + 'got '; + if (this._expected.length === indexedActual.length) { + for (let j = 0; j < this._expected.length; j++) { + if (this._expected[j] !== indexedActual[j].value) { + firstErrorIndex = indexedActual[j].index; + passed = false; + failMessage += this._actual[firstErrorIndex] + ' at index ' + + firstErrorIndex + '.'; + break; + } + } + } else { + passed = false; + let indexedValues = indexedActual.map(x => x.value); + failMessage += `${indexedActual.length} values, [${ + indexedValues}], instead of ${this._expected.length}.`; + } + + return this._assert( + passed, + '${actual} contains all the expected values in the correct order: ' + + '${expected}.', + failMessage); + } + + /** + * Check if |actual| array does not have any glitches. Note that |threshold| + * is not optional and is to define the desired threshold value. + * + * @example + * should([0.5, 0.5, 0.55, 0.5, 0.45, 0.5]).notGlitch(0.06); + * + * @result + * "PASS [0.5,0.5,0.55,0.5,0.45,0.5] has no glitch above the threshold + * of 0.06." + * + */ + notGlitch() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let passed = true; + let passDetail, failDetail; + + let actual = this._actual; + let expected = this._expected; + for (let index = 0; index < actual.length; ++index) { + let diff = Math.abs(actual[index - 1] - actual[index]); + if (diff >= expected) { + passed = false; + failDetail = '${actual} has a glitch at index ' + index + + ' of size ' + diff + '.'; + } + } + + passDetail = + '${actual} has no glitch above the threshold of ${expected}.'; + + return this._assert(passed, passDetail, failDetail); + } + + /** + * Check if |actual| is close to |expected| using the given relative error + * |threshold|. + * + * @example + * should(2.3).beCloseTo(2, { threshold: 0.3 }); + * + * @result + * "PASS 2.3 is 2 within an error of 0.3." + * @param {Object} options Options for assertion. + * @param {Number} options.threshold Threshold value for the comparison. + */ + beCloseTo() { + this._processArguments(arguments); + + // The threshold is relative except when |expected| is zero, in which case + // it is absolute. + let absExpected = this._expected ? Math.abs(this._expected) : 1; + let error = Math.abs(this._actual - this._expected) / absExpected; + + // debugger; + + return this._assert( + error <= this._options.threshold, + '${actual} is ${expected} within an error of ${threshold}.', + '${actual} is not close to ${expected} within a relative error of ' + + '${threshold} (RelErr=' + error + ').'); + } + + /** + * Check if |target| array is close to |expected| array element-wise within + * a certain error bound given by the |options|. + * + * The error criterion is: + * abs(actual[k] - expected[k]) < max(absErr, relErr * abs(expected)) + * + * If nothing is given for |options|, then absErr = relErr = 0. If + * absErr = 0, then the error criterion is a relative error. A non-zero + * absErr value produces a mix intended to handle the case where the + * expected value is 0, allowing the target value to differ by absErr from + * the expected. + * + * @param {Number} options.absoluteThreshold Absolute threshold. + * @param {Number} options.relativeThreshold Relative threshold. + */ + beCloseToArray() { + this._processArguments(arguments); + this._printActualForFailure = false; + + let passed = true; + let passDetail, failDetail; + + // Parsing options. + let absErrorThreshold = (this._options.absoluteThreshold || 0); + let relErrorThreshold = (this._options.relativeThreshold || 0); + + // A collection of all of the values that satisfy the error criterion. + // This holds the absolute difference between the target element and the + // expected element. + let errors = {}; + + // Keep track of the max absolute error found. + let maxAbsError = -Infinity, maxAbsErrorIndex = -1; + + // Keep track of the max relative error found, ignoring cases where the + // relative error is Infinity because the expected value is 0. + let maxRelError = -Infinity, maxRelErrorIndex = -1; + + let actual = this._actual; + let expected = this._expected; + + for (let index = 0; index < expected.length; ++index) { + let diff = Math.abs(actual[index] - expected[index]); + let absExpected = Math.abs(expected[index]); + let relError = diff / absExpected; + + if (diff > + Math.max(absErrorThreshold, relErrorThreshold * absExpected)) { + if (diff > maxAbsError) { + maxAbsErrorIndex = index; + maxAbsError = diff; + } + + if (!isNaN(relError) && relError > maxRelError) { + maxRelErrorIndex = index; + maxRelError = relError; + } + + errors[index] = diff; + } + } + + let numberOfErrors = Object.keys(errors).length; + let maxAllowedErrorDetail = JSON.stringify({ + absoluteThreshold: absErrorThreshold, + relativeThreshold: relErrorThreshold + }); + + if (numberOfErrors === 0) { + // The assertion was successful. + passDetail = '${actual} equals ${expected} with an element-wise ' + + 'tolerance of ' + maxAllowedErrorDetail + '.'; + } else { + // Failed. Prepare the detailed failure log. + passed = false; + failDetail = '${actual} does not equal ${expected} with an ' + + 'element-wise tolerance of ' + maxAllowedErrorDetail + '.\n'; + + // Print out actual, expected, absolute error, and relative error. + let counter = 0; + failDetail += '\tIndex\tActual\t\t\tExpected\t\tAbsError' + + '\t\tRelError\t\tTest threshold'; + let printedIndices = []; + for (let index in errors) { + failDetail += + '\n' + + _formatFailureEntry( + index, actual[index], expected[index], errors[index], + _closeToThreshold( + absErrorThreshold, relErrorThreshold, expected[index])); + + printedIndices.push(index); + if (++counter > this._options.numberOfErrors) { + failDetail += + '\n\t...and ' + (numberOfErrors - counter) + ' more errors.'; + break; + } + } + + // Finalize the error log: print out the location of both the maxAbs + // error and the maxRel error so we can adjust thresholds appropriately + // in the test. + failDetail += '\n' + + '\tMax AbsError of ' + maxAbsError.toExponential(16) + + ' at index of ' + maxAbsErrorIndex + '.\n'; + if (printedIndices.find(element => { + return element == maxAbsErrorIndex; + }) === undefined) { + // Print an entry for this index if we haven't already. + failDetail += + _formatFailureEntry( + maxAbsErrorIndex, actual[maxAbsErrorIndex], + expected[maxAbsErrorIndex], errors[maxAbsErrorIndex], + _closeToThreshold( + absErrorThreshold, relErrorThreshold, + expected[maxAbsErrorIndex])) + + '\n'; + } + failDetail += '\tMax RelError of ' + maxRelError.toExponential(16) + + ' at index of ' + maxRelErrorIndex + '.\n'; + if (printedIndices.find(element => { + return element == maxRelErrorIndex; + }) === undefined) { + // Print an entry for this index if we haven't already. + failDetail += + _formatFailureEntry( + maxRelErrorIndex, actual[maxRelErrorIndex], + expected[maxRelErrorIndex], errors[maxRelErrorIndex], + _closeToThreshold( + absErrorThreshold, relErrorThreshold, + expected[maxRelErrorIndex])) + + '\n'; + } + } + + return this._assert(passed, passDetail, failDetail); + } + + /** + * A temporary escape hat for printing an in-task message. The description + * for the |actual| is required to get the message printed properly. + * + * TODO(hongchan): remove this method when the transition from the old Audit + * to the new Audit is completed. + * @example + * should(true, 'The message is').message('truthful!', 'false!'); + * + * @result + * "PASS The message is truthful!" + */ + message(passDetail, failDetail) { + return this._assert( + this._actual, '${actual} ' + passDetail, '${actual} ' + failDetail); + } + + /** + * Check if |expected| property is truly owned by |actual| object. + * + * @example + * should(BaseAudioContext.prototype, + * 'BaseAudioContext.prototype').haveOwnProperty('createGain'); + * + * @result + * "PASS BaseAudioContext.prototype has an own property of + * 'createGain'." + */ + haveOwnProperty() { + this._processArguments(arguments); + + return this._assert( + this._actual.hasOwnProperty(this._expected), + '${actual} has an own property of "${expected}".', + '${actual} does not own the property of "${expected}".'); + } + + + /** + * Check if |expected| property is not owned by |actual| object. + * + * @example + * should(BaseAudioContext.prototype, + * 'BaseAudioContext.prototype') + * .notHaveOwnProperty('startRendering'); + * + * @result + * "PASS BaseAudioContext.prototype does not have an own property of + * 'startRendering'." + */ + notHaveOwnProperty() { + this._processArguments(arguments); + + return this._assert( + !this._actual.hasOwnProperty(this._expected), + '${actual} does not have an own property of "${expected}".', + '${actual} has an own the property of "${expected}".') + } + + + /** + * Check if an object is inherited from a class. This looks up the entire + * prototype chain of a given object and tries to find a match. + * + * @example + * should(sourceNode, 'A buffer source node') + * .inheritFrom('AudioScheduledSourceNode'); + * + * @result + * "PASS A buffer source node inherits from 'AudioScheduledSourceNode'." + */ + inheritFrom() { + this._processArguments(arguments); + + let prototypes = []; + let currentPrototype = Object.getPrototypeOf(this._actual); + while (currentPrototype) { + prototypes.push(currentPrototype.constructor.name); + currentPrototype = Object.getPrototypeOf(currentPrototype); + } + + return this._assert( + prototypes.includes(this._expected), + '${actual} inherits from "${expected}".', + '${actual} does not inherit from "${expected}".'); + } + } + + + // Task Class state enum. + const TaskState = {PENDING: 0, STARTED: 1, FINISHED: 2}; + + + /** + * @class Task + * @description WebAudio testing task. Managed by TaskRunner. + */ + class Task { + /** + * Task constructor. + * @param {Object} taskRunner Reference of associated task runner. + * @param {String||Object} taskLabel Task label if a string is given. This + * parameter can be a dictionary with the + * following fields. + * @param {String} taskLabel.label Task label. + * @param {String} taskLabel.description Description of task. + * @param {Function} taskFunction Task function to be performed. + * @return {Object} Task object. + */ + constructor(taskRunner, taskLabel, taskFunction) { + this._taskRunner = taskRunner; + this._taskFunction = taskFunction; + + if (typeof taskLabel === 'string') { + this._label = taskLabel; + this._description = null; + } else if (typeof taskLabel === 'object') { + if (typeof taskLabel.label !== 'string') { + _throwException('Task.constructor:: task label must be string.'); + } + this._label = taskLabel.label; + this._description = (typeof taskLabel.description === 'string') ? + taskLabel.description : + null; + } else { + _throwException( + 'Task.constructor:: task label must be a string or ' + + 'a dictionary.'); + } + + this._state = TaskState.PENDING; + this._result = true; + + this._totalAssertions = 0; + this._failedAssertions = 0; + } + + get label() { + return this._label; + } + + get state() { + return this._state; + } + + get result() { + return this._result; + } + + // Start the assertion chain. + should(actual, actualDescription) { + // If no argument is given, we cannot proceed. Halt. + if (arguments.length === 0) + _throwException('Task.should:: requires at least 1 argument.'); + + return new Should(this, actual, actualDescription); + } + + // Run this task. |this| task will be passed into the user-supplied test + // task function. + run(harnessTest) { + this._state = TaskState.STARTED; + this._harnessTest = harnessTest; + // Print out the task entry with label and description. + _logPassed( + '> [' + this._label + '] ' + + (this._description ? this._description : '')); + + return new Promise((resolve, reject) => { + this._resolve = resolve; + this._reject = reject; + let result = this._taskFunction(this, this.should.bind(this)); + if (result && typeof result.then === "function") { + result.then(() => this.done()).catch(reject); + } + }); + } + + // Update the task success based on the individual assertion/test inside. + update(subTask) { + // After one of tests fails within a task, the result is irreversible. + if (subTask.result === false) { + this._result = false; + this._failedAssertions++; + } + + this._totalAssertions++; + } + + // Finish the current task and start the next one if available. + done() { + assert_equals(this._state, TaskState.STARTED) + this._state = TaskState.FINISHED; + + let message = '< [' + this._label + '] '; + + if (this._result) { + message += 'All assertions passed. (total ' + this._totalAssertions + + ' assertions)'; + _logPassed(message); + } else { + message += this._failedAssertions + ' out of ' + this._totalAssertions + + ' assertions were failed.' + _logFailed(message); + } + + this._resolve(); + } + + // Runs |subTask| |time| milliseconds later. |setTimeout| is not allowed in + // WPT linter, so a thin wrapper around the harness's |step_timeout| is + // used here. Returns a Promise which is resolved after |subTask| runs. + timeout(subTask, time) { + return new Promise(resolve => { + this._harnessTest.step_timeout(() => { + let result = subTask(); + if (result && typeof result.then === "function") { + // Chain rejection directly to the harness test Promise, to report + // the rejection against the subtest even when the caller of + // timeout does not handle the rejection. + result.then(resolve, this._reject()); + } else { + resolve(); + } + }, time); + }); + } + + isPassed() { + return this._state === TaskState.FINISHED && this._result; + } + + toString() { + return '"' + this._label + '": ' + this._description; + } + } + + + /** + * @class TaskRunner + * @description WebAudio testing task runner. Manages tasks. + */ + class TaskRunner { + constructor() { + this._tasks = {}; + this._taskSequence = []; + + // Configure testharness.js for the async operation. + setup(new Function(), {explicit_done: true}); + } + + _finish() { + let numberOfFailures = 0; + for (let taskIndex in this._taskSequence) { + let task = this._tasks[this._taskSequence[taskIndex]]; + numberOfFailures += task.result ? 0 : 1; + } + + let prefix = '# AUDIT TASK RUNNER FINISHED: '; + if (numberOfFailures > 0) { + _logFailed( + prefix + numberOfFailures + ' out of ' + this._taskSequence.length + + ' tasks were failed.'); + } else { + _logPassed( + prefix + this._taskSequence.length + ' tasks ran successfully.'); + } + + return Promise.resolve(); + } + + // |taskLabel| can be either a string or a dictionary. See Task constructor + // for the detail. If |taskFunction| returns a thenable, then the task + // is considered complete when the thenable is fulfilled; otherwise the + // task must be completed with an explicit call to |task.done()|. + define(taskLabel, taskFunction) { + let task = new Task(this, taskLabel, taskFunction); + if (this._tasks.hasOwnProperty(task.label)) { + _throwException('Audit.define:: Duplicate task definition.'); + return; + } + this._tasks[task.label] = task; + this._taskSequence.push(task.label); + } + + // Start running all the tasks scheduled. Multiple task names can be passed + // to execute them sequentially. Zero argument will perform all defined + // tasks in the order of definition. + run() { + // Display the beginning of the test suite. + _logPassed('# AUDIT TASK RUNNER STARTED.'); + + // If the argument is specified, override the default task sequence with + // the specified one. + if (arguments.length > 0) { + this._taskSequence = []; + for (let i = 0; i < arguments.length; i++) { + let taskLabel = arguments[i]; + if (!this._tasks.hasOwnProperty(taskLabel)) { + _throwException('Audit.run:: undefined task.'); + } else if (this._taskSequence.includes(taskLabel)) { + _throwException('Audit.run:: duplicate task request.'); + } else { + this._taskSequence.push(taskLabel); + } + } + } + + if (this._taskSequence.length === 0) { + _throwException('Audit.run:: no task to run.'); + return; + } + + for (let taskIndex in this._taskSequence) { + let task = this._tasks[this._taskSequence[taskIndex]]; + // Some tests assume that tasks run in sequence, which is provided by + // promise_test(). + promise_test((t) => task.run(t), `Executing "${task.label}"`); + } + + // Schedule a summary report on completion. + promise_test(() => this._finish(), "Audit report"); + + // From testharness.js. The harness now need not wait for more subtests + // to be added. + _testharnessDone(); + } + } + + /** + * Load file from a given URL and pass ArrayBuffer to the following promise. + * @param {String} fileUrl file URL. + * @return {Promise} + * + * @example + * Audit.loadFileFromUrl('resources/my-sound.ogg').then((response) => { + * audioContext.decodeAudioData(response).then((audioBuffer) => { + * // Do something with AudioBuffer. + * }); + * }); + */ + function loadFileFromUrl(fileUrl) { + return new Promise((resolve, reject) => { + let xhr = new XMLHttpRequest(); + xhr.open('GET', fileUrl, true); + xhr.responseType = 'arraybuffer'; + + xhr.onload = () => { + // |status = 0| is a workaround for the run_web_test.py server. We are + // speculating the server quits the transaction prematurely without + // completing the request. + if (xhr.status === 200 || xhr.status === 0) { + resolve(xhr.response); + } else { + let errorMessage = 'loadFile: Request failed when loading ' + + fileUrl + '. ' + xhr.statusText + '. (status = ' + xhr.status + + ')'; + if (reject) { + reject(errorMessage); + } else { + new Error(errorMessage); + } + } + }; + + xhr.onerror = (event) => { + let errorMessage = + 'loadFile: Network failure when loading ' + fileUrl + '.'; + if (reject) { + reject(errorMessage); + } else { + new Error(errorMessage); + } + }; + + xhr.send(); + }); + } + + /** + * @class Audit + * @description A WebAudio layout test task manager. + * @example + * let audit = Audit.createTaskRunner(); + * audit.define('first-task', function (task, should) { + * should(someValue).beEqualTo(someValue); + * task.done(); + * }); + * audit.run(); + */ + return { + + /** + * Creates an instance of Audit task runner. + * @param {Object} options Options for task runner. + * @param {Boolean} options.requireResultFile True if the test suite + * requires explicit text + * comparison with the expected + * result file. + */ + createTaskRunner: function(options) { + if (options && options.requireResultFile == true) { + _logError( + 'this test requires the explicit comparison with the ' + + 'expected result when it runs with run_web_tests.py.'); + } + + return new TaskRunner(); + }, + + /** + * Load file from a given URL and pass ArrayBuffer to the following promise. + * See |loadFileFromUrl| method for the detail. + */ + loadFileFromUrl: loadFileFromUrl + + }; + +})(); diff --git a/testing/web-platform/tests/webaudio/resources/biquad-filters.js b/testing/web-platform/tests/webaudio/resources/biquad-filters.js new file mode 100644 index 0000000000..467436326a --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/biquad-filters.js @@ -0,0 +1,376 @@ +// A biquad filter has a z-transform of +// H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2) +// +// The formulas for the various filters were taken from +// http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt. + + +// Lowpass filter. +function createLowpassFilter(freq, q, gain) { + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + + if (freq == 1) { + // The formula below works, except for roundoff. When freq = 1, + // the filter is just a wire, so hardwire the coefficients. + b0 = 1; + b1 = 0; + b2 = 0; + a0 = 1; + a1 = 0; + a2 = 0; + } else { + let theta = Math.PI * freq; + let alpha = Math.sin(theta) / (2 * Math.pow(10, q / 20)); + let cosw = Math.cos(theta); + let beta = (1 - cosw) / 2; + + b0 = beta; + b1 = 2 * beta; + b2 = beta; + a0 = 1 + alpha; + a1 = -2 * cosw; + a2 = 1 - alpha; + } + + return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); +} + +function createHighpassFilter(freq, q, gain) { + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + + if (freq == 1) { + // The filter is 0 + b0 = 0; + b1 = 0; + b2 = 0; + a0 = 1; + a1 = 0; + a2 = 0; + } else if (freq == 0) { + // The filter is 1. Computation of coefficients below is ok, but + // there's a pole at 1 and a zero at 1, so round-off could make + // the filter unstable. + b0 = 1; + b1 = 0; + b2 = 0; + a0 = 1; + a1 = 0; + a2 = 0; + } else { + let theta = Math.PI * freq; + let alpha = Math.sin(theta) / (2 * Math.pow(10, q / 20)); + let cosw = Math.cos(theta); + let beta = (1 + cosw) / 2; + + b0 = beta; + b1 = -2 * beta; + b2 = beta; + a0 = 1 + alpha; + a1 = -2 * cosw; + a2 = 1 - alpha; + } + + return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); +} + +function normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2) { + let scale = 1 / a0; + + return { + b0: b0 * scale, + b1: b1 * scale, + b2: b2 * scale, + a1: a1 * scale, + a2: a2 * scale + }; +} + +function createBandpassFilter(freq, q, gain) { + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + let coef; + + if (freq > 0 && freq < 1) { + let w0 = Math.PI * freq; + if (q > 0) { + let alpha = Math.sin(w0) / (2 * q); + let k = Math.cos(w0); + + b0 = alpha; + b1 = 0; + b2 = -alpha; + a0 = 1 + alpha; + a1 = -2 * k; + a2 = 1 - alpha; + + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // q = 0, and frequency is not 0 or 1. The above formula has a + // divide by zero problem. The limit of the z-transform as q + // approaches 0 is 1, so set the filter that way. + coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0}; + } + } else { + // When freq = 0 or 1, the z-transform is identically 0, + // independent of q. + coef = { b0: 0, b1: 0, b2: 0, a1: 0, a2: 0 } + } + + return coef; +} + +function createLowShelfFilter(freq, q, gain) { + // q not used + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + let coef; + + let S = 1; + let A = Math.pow(10, gain / 40); + + if (freq == 1) { + // The filter is just a constant gain + coef = {b0: A * A, b1: 0, b2: 0, a1: 0, a2: 0}; + } else if (freq == 0) { + // The filter is 1 + coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0}; + } else { + let w0 = Math.PI * freq; + let alpha = 1 / 2 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2); + let k = Math.cos(w0); + let k2 = 2 * Math.sqrt(A) * alpha; + let Ap1 = A + 1; + let Am1 = A - 1; + + b0 = A * (Ap1 - Am1 * k + k2); + b1 = 2 * A * (Am1 - Ap1 * k); + b2 = A * (Ap1 - Am1 * k - k2); + a0 = Ap1 + Am1 * k + k2; + a1 = -2 * (Am1 + Ap1 * k); + a2 = Ap1 + Am1 * k - k2; + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } + + return coef; +} + +function createHighShelfFilter(freq, q, gain) { + // q not used + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + let coef; + + let A = Math.pow(10, gain / 40); + + if (freq == 1) { + // When freq = 1, the z-transform is 1 + coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0}; + } else if (freq > 0) { + let w0 = Math.PI * freq; + let S = 1; + let alpha = 0.5 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2); + let k = Math.cos(w0); + let k2 = 2 * Math.sqrt(A) * alpha; + let Ap1 = A + 1; + let Am1 = A - 1; + + b0 = A * (Ap1 + Am1 * k + k2); + b1 = -2 * A * (Am1 + Ap1 * k); + b2 = A * (Ap1 + Am1 * k - k2); + a0 = Ap1 - Am1 * k + k2; + a1 = 2 * (Am1 - Ap1 * k); + a2 = Ap1 - Am1 * k - k2; + + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When freq = 0, the filter is just a gain + coef = {b0: A * A, b1: 0, b2: 0, a1: 0, a2: 0}; + } + + return coef; +} + +function createPeakingFilter(freq, q, gain) { + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + let coef; + + let A = Math.pow(10, gain / 40); + + if (freq > 0 && freq < 1) { + if (q > 0) { + let w0 = Math.PI * freq; + let alpha = Math.sin(w0) / (2 * q); + let k = Math.cos(w0); + + b0 = 1 + alpha * A; + b1 = -2 * k; + b2 = 1 - alpha * A; + a0 = 1 + alpha / A; + a1 = -2 * k; + a2 = 1 - alpha / A; + + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // q = 0, we have a divide by zero problem in the formulas + // above. But if we look at the z-transform, we see that the + // limit as q approaches 0 is A^2. + coef = {b0: A * A, b1: 0, b2: 0, a1: 0, a2: 0}; + } + } else { + // freq = 0 or 1, the z-transform is 1 + coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0}; + } + + return coef; +} + +function createNotchFilter(freq, q, gain) { + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + let coef; + + if (freq > 0 && freq < 1) { + if (q > 0) { + let w0 = Math.PI * freq; + let alpha = Math.sin(w0) / (2 * q); + let k = Math.cos(w0); + + b0 = 1; + b1 = -2 * k; + b2 = 1; + a0 = 1 + alpha; + a1 = -2 * k; + a2 = 1 - alpha; + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When q = 0, we get a divide by zero above. The limit of the + // z-transform as q approaches 0 is 0, so set the coefficients + // appropriately. + coef = {b0: 0, b1: 0, b2: 0, a1: 0, a2: 0}; + } + } else { + // When freq = 0 or 1, the z-transform is 1 + coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0}; + } + + return coef; +} + +function createAllpassFilter(freq, q, gain) { + let b0; + let b1; + let b2; + let a0; + let a1; + let a2; + let coef; + + if (freq > 0 && freq < 1) { + if (q > 0) { + let w0 = Math.PI * freq; + let alpha = Math.sin(w0) / (2 * q); + let k = Math.cos(w0); + + b0 = 1 - alpha; + b1 = -2 * k; + b2 = 1 + alpha; + a0 = 1 + alpha; + a1 = -2 * k; + a2 = 1 - alpha; + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // q = 0 + coef = {b0: -1, b1: 0, b2: 0, a1: 0, a2: 0}; + } + } else { + coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0}; + } + + return coef; +} + +function filterData(filterCoef, signal, len) { + let y = new Array(len); + let b0 = filterCoef.b0; + let b1 = filterCoef.b1; + let b2 = filterCoef.b2; + let a1 = filterCoef.a1; + let a2 = filterCoef.a2; + + // Prime the pump. (Assumes the signal has length >= 2!) + y[0] = b0 * signal[0]; + y[1] = b0 * signal[1] + b1 * signal[0] - a1 * y[0]; + + // Filter all of the signal that we have. + for (let k = 2; k < Math.min(signal.length, len); ++k) { + y[k] = b0 * signal[k] + b1 * signal[k - 1] + b2 * signal[k - 2] - + a1 * y[k - 1] - a2 * y[k - 2]; + } + + // If we need to filter more, but don't have any signal left, + // assume the signal is zero. + for (let k = signal.length; k < len; ++k) { + y[k] = -a1 * y[k - 1] - a2 * y[k - 2]; + } + + return y; +} + +// Map the filter type name to a function that computes the filter coefficents +// for the given filter type. +let filterCreatorFunction = { + 'lowpass': createLowpassFilter, + 'highpass': createHighpassFilter, + 'bandpass': createBandpassFilter, + 'lowshelf': createLowShelfFilter, + 'highshelf': createHighShelfFilter, + 'peaking': createPeakingFilter, + 'notch': createNotchFilter, + 'allpass': createAllpassFilter +}; + +let filterTypeName = { + 'lowpass': 'Lowpass filter', + 'highpass': 'Highpass filter', + 'bandpass': 'Bandpass filter', + 'lowshelf': 'Lowshelf filter', + 'highshelf': 'Highshelf filter', + 'peaking': 'Peaking filter', + 'notch': 'Notch filter', + 'allpass': 'Allpass filter' +}; + +function createFilter(filterType, freq, q, gain) { + return filterCreatorFunction[filterType](freq, q, gain); +} diff --git a/testing/web-platform/tests/webaudio/resources/biquad-testing.js b/testing/web-platform/tests/webaudio/resources/biquad-testing.js new file mode 100644 index 0000000000..7f90a1f72b --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/biquad-testing.js @@ -0,0 +1,172 @@ +// Globals, to make testing and debugging easier. +let context; +let filter; +let signal; +let renderedBuffer; +let renderedData; + +// Use a power of two to eliminate round-off in converting frame to time +let sampleRate = 32768; +let pulseLengthFrames = .1 * sampleRate; + +// Maximum allowed error for the test to succeed. Experimentally determined. +let maxAllowedError = 5.9e-8; + +// This must be large enough so that the filtered result is essentially zero. +// See comments for createTestAndRun. This must be a whole number of frames. +let timeStep = Math.ceil(.1 * sampleRate) / sampleRate; + +// Maximum number of filters we can process (mostly for setting the +// render length correctly.) +let maxFilters = 5; + +// How long to render. Must be long enough for all of the filters we +// want to test. +let renderLengthSeconds = timeStep * (maxFilters + 1); + +let renderLengthSamples = Math.round(renderLengthSeconds * sampleRate); + +// Number of filters that will be processed. +let nFilters; + +function createImpulseBuffer(context, length) { + let impulse = context.createBuffer(1, length, context.sampleRate); + let data = impulse.getChannelData(0); + for (let k = 1; k < data.length; ++k) { + data[k] = 0; + } + data[0] = 1; + + return impulse; +} + + +function createTestAndRun(context, filterType, testParameters) { + // To test the filters, we apply a signal (an impulse) to each of + // the specified filters, with each signal starting at a different + // time. The output of the filters is summed together at the + // output. Thus for filter k, the signal input to the filter + // starts at time k * timeStep. For this to work well, timeStep + // must be large enough for the output of each filter to have + // decayed to zero with timeStep seconds. That way the filter + // outputs don't interfere with each other. + + let filterParameters = testParameters.filterParameters; + nFilters = Math.min(filterParameters.length, maxFilters); + + signal = new Array(nFilters); + filter = new Array(nFilters); + + impulse = createImpulseBuffer(context, pulseLengthFrames); + + // Create all of the signal sources and filters that we need. + for (let k = 0; k < nFilters; ++k) { + signal[k] = context.createBufferSource(); + signal[k].buffer = impulse; + + filter[k] = context.createBiquadFilter(); + filter[k].type = filterType; + filter[k].frequency.value = + context.sampleRate / 2 * filterParameters[k].cutoff; + filter[k].detune.value = (filterParameters[k].detune === undefined) ? + 0 : + filterParameters[k].detune; + filter[k].Q.value = filterParameters[k].q; + filter[k].gain.value = filterParameters[k].gain; + + signal[k].connect(filter[k]); + filter[k].connect(context.destination); + + signal[k].start(timeStep * k); + } + + return context.startRendering().then(buffer => { + checkFilterResponse(buffer, filterType, testParameters); + }); +} + +function addSignal(dest, src, destOffset) { + // Add src to dest at the given dest offset. + for (let k = destOffset, j = 0; k < dest.length, j < src.length; ++k, ++j) { + dest[k] += src[j]; + } +} + +function generateReference(filterType, filterParameters) { + let result = new Array(renderLengthSamples); + let data = new Array(renderLengthSamples); + // Initialize the result array and data. + for (let k = 0; k < result.length; ++k) { + result[k] = 0; + data[k] = 0; + } + // Make data an impulse. + data[0] = 1; + + for (let k = 0; k < nFilters; ++k) { + // Filter an impulse + let detune = (filterParameters[k].detune === undefined) ? + 0 : + filterParameters[k].detune; + let frequency = filterParameters[k].cutoff * + Math.pow(2, detune / 1200); // Apply detune, converting from Cents. + + let filterCoef = createFilter( + filterType, frequency, filterParameters[k].q, filterParameters[k].gain); + let y = filterData(filterCoef, data, renderLengthSamples); + + // Accumulate this filtered data into the final output at the desired + // offset. + addSignal(result, y, timeToSampleFrame(timeStep * k, sampleRate)); + } + + return result; +} + +function checkFilterResponse(renderedBuffer, filterType, testParameters) { + let filterParameters = testParameters.filterParameters; + let maxAllowedError = testParameters.threshold; + let should = testParameters.should; + + renderedData = renderedBuffer.getChannelData(0); + + reference = generateReference(filterType, filterParameters); + + let len = Math.min(renderedData.length, reference.length); + + let success = true; + + // Maximum error between rendered data and expected data + let maxError = 0; + + // Sample offset where the maximum error occurred. + let maxPosition = 0; + + // Number of infinities or NaNs that occurred in the rendered data. + let invalidNumberCount = 0; + + should(nFilters, 'Number of filters tested') + .beEqualTo(filterParameters.length); + + // Compare the rendered signal with our reference, keeping + // track of the maximum difference (and the offset of the max + // difference.) Check for bad numbers in the rendered output + // too. There shouldn't be any. + for (let k = 0; k < len; ++k) { + let err = Math.abs(renderedData[k] - reference[k]); + if (err > maxError) { + maxError = err; + maxPosition = k; + } + if (!isValidNumber(renderedData[k])) { + ++invalidNumberCount; + } + } + + should( + invalidNumberCount, 'Number of non-finite values in the rendered output') + .beEqualTo(0); + + should(maxError, 'Max error in ' + filterTypeName[filterType] + ' response') + .beLessThanOrEqualTo(maxAllowedError); +} diff --git a/testing/web-platform/tests/webaudio/resources/convolution-testing.js b/testing/web-platform/tests/webaudio/resources/convolution-testing.js new file mode 100644 index 0000000000..c976f86c78 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/convolution-testing.js @@ -0,0 +1,168 @@ +let sampleRate = 44100.0; + +let renderLengthSeconds = 8; +let pulseLengthSeconds = 1; +let pulseLengthFrames = pulseLengthSeconds * sampleRate; + +function createSquarePulseBuffer(context, sampleFrameLength) { + let audioBuffer = + context.createBuffer(1, sampleFrameLength, context.sampleRate); + + let n = audioBuffer.length; + let data = audioBuffer.getChannelData(0); + + for (let i = 0; i < n; ++i) + data[i] = 1; + + return audioBuffer; +} + +// The triangle buffer holds the expected result of the convolution. +// It linearly ramps up from 0 to its maximum value (at the center) +// then linearly ramps down to 0. The center value corresponds to the +// point where the two square pulses overlap the most. +function createTrianglePulseBuffer(context, sampleFrameLength) { + let audioBuffer = + context.createBuffer(1, sampleFrameLength, context.sampleRate); + + let n = audioBuffer.length; + let halfLength = n / 2; + let data = audioBuffer.getChannelData(0); + + for (let i = 0; i < halfLength; ++i) + data[i] = i + 1; + + for (let i = halfLength; i < n; ++i) + data[i] = n - i - 1; + + return audioBuffer; +} + +function log10(x) { + return Math.log(x) / Math.LN10; +} + +function linearToDecibel(x) { + return 20 * log10(x); +} + +// Verify that the rendered result is very close to the reference +// triangular pulse. +function checkTriangularPulse(rendered, reference, should) { + let match = true; + let maxDelta = 0; + let valueAtMaxDelta = 0; + let maxDeltaIndex = 0; + + for (let i = 0; i < reference.length; ++i) { + let diff = rendered[i] - reference[i]; + let x = Math.abs(diff); + if (x > maxDelta) { + maxDelta = x; + valueAtMaxDelta = reference[i]; + maxDeltaIndex = i; + } + } + + // allowedDeviationFraction was determined experimentally. It + // is the threshold of the relative error at the maximum + // difference between the true triangular pulse and the + // rendered pulse. + let allowedDeviationDecibels = -124.41; + let maxDeviationDecibels = linearToDecibel(maxDelta / valueAtMaxDelta); + + should( + maxDeviationDecibels, + 'Deviation (in dB) of triangular portion of convolution') + .beLessThanOrEqualTo(allowedDeviationDecibels); + + return match; +} + +// Verify that the rendered data is close to zero for the first part +// of the tail. +function checkTail1(data, reference, breakpoint, should) { + let isZero = true; + let tail1Max = 0; + + for (let i = reference.length; i < reference.length + breakpoint; ++i) { + let mag = Math.abs(data[i]); + if (mag > tail1Max) { + tail1Max = mag; + } + } + + // Let's find the peak of the reference (even though we know a + // priori what it is). + let refMax = 0; + for (let i = 0; i < reference.length; ++i) { + refMax = Math.max(refMax, Math.abs(reference[i])); + } + + // This threshold is experimentally determined by examining the + // value of tail1MaxDecibels. + let threshold1 = -129.7; + + let tail1MaxDecibels = linearToDecibel(tail1Max / refMax); + should(tail1MaxDecibels, 'Deviation in first part of tail of convolutions') + .beLessThanOrEqualTo(threshold1); + + return isZero; +} + +// Verify that the second part of the tail of the convolution is +// exactly zero. +function checkTail2(data, reference, breakpoint, should) { + let isZero = true; + let tail2Max = 0; + // For the second part of the tail, the maximum value should be + // exactly zero. + let threshold2 = 0; + for (let i = reference.length + breakpoint; i < data.length; ++i) { + if (Math.abs(data[i]) > 0) { + isZero = false; + break; + } + } + + should(isZero, 'Rendered signal after tail of convolution is silent') + .beTrue(); + + return isZero; +} + +function checkConvolvedResult(renderedBuffer, trianglePulse, should) { + let referenceData = trianglePulse.getChannelData(0); + let renderedData = renderedBuffer.getChannelData(0); + + let success = true; + + // Verify the triangular pulse is actually triangular. + + success = + success && checkTriangularPulse(renderedData, referenceData, should); + + // Make sure that portion after convolved portion is totally + // silent. But round-off prevents this from being completely + // true. At the end of the triangle, it should be close to + // zero. If we go farther out, it should be even closer and + // eventually zero. + + // For the tail of the convolution (where the result would be + // theoretically zero), we partition the tail into two + // parts. The first is the at the beginning of the tail, + // where we tolerate a small but non-zero value. The second part is + // farther along the tail where the result should be zero. + + // breakpoint is the point dividing the first two tail parts + // we're looking at. Experimentally determined. + let breakpoint = 12800; + + success = + success && checkTail1(renderedData, referenceData, breakpoint, should); + + success = + success && checkTail2(renderedData, referenceData, breakpoint, should); + + should(success, 'Test signal convolved').message('correctly', 'incorrectly'); +} diff --git a/testing/web-platform/tests/webaudio/resources/delay-testing.js b/testing/web-platform/tests/webaudio/resources/delay-testing.js new file mode 100644 index 0000000000..9033da6730 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/delay-testing.js @@ -0,0 +1,66 @@ +let sampleRate = 44100.0; + +let renderLengthSeconds = 4; +let delayTimeSeconds = 0.5; +let toneLengthSeconds = 2; + +function createToneBuffer(context, frequency, numberOfCycles, sampleRate) { + let duration = numberOfCycles / frequency; + let sampleFrameLength = duration * sampleRate; + + let audioBuffer = context.createBuffer(1, sampleFrameLength, sampleRate); + + let n = audioBuffer.length; + let data = audioBuffer.getChannelData(0); + + for (let i = 0; i < n; ++i) + data[i] = Math.sin(frequency * 2.0 * Math.PI * i / sampleRate); + + return audioBuffer; +} + +function checkDelayedResult(renderedBuffer, toneBuffer, should) { + let sourceData = toneBuffer.getChannelData(0); + let renderedData = renderedBuffer.getChannelData(0); + + let delayTimeFrames = delayTimeSeconds * sampleRate; + let toneLengthFrames = toneLengthSeconds * sampleRate; + + let success = true; + + let n = renderedBuffer.length; + + for (let i = 0; i < n; ++i) { + if (i < delayTimeFrames) { + // Check that initial portion is 0 (since signal is delayed). + if (renderedData[i] != 0) { + should( + renderedData[i], 'Initial portion expected to be 0 at frame ' + i) + .beEqualTo(0); + success = false; + break; + } + } else if (i >= delayTimeFrames && i < delayTimeFrames + toneLengthFrames) { + // Make sure that the tone data is delayed by exactly the expected number + // of frames. + let j = i - delayTimeFrames; + if (renderedData[i] != sourceData[j]) { + should(renderedData[i], 'Actual data at frame ' + i) + .beEqualTo(sourceData[j]); + success = false; + break; + } + } else { + // Make sure we have silence after the delayed tone. + if (renderedData[i] != 0) { + should(renderedData[j], 'Final portion at frame ' + i).beEqualTo(0); + success = false; + break; + } + } + } + + should( + success, 'Delaying test signal by ' + delayTimeSeconds + ' sec was done') + .message('correctly', 'incorrectly') +} diff --git a/testing/web-platform/tests/webaudio/resources/distance-model-testing.js b/testing/web-platform/tests/webaudio/resources/distance-model-testing.js new file mode 100644 index 0000000000..f8a6cf940a --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/distance-model-testing.js @@ -0,0 +1,196 @@ +// Use a power of two to eliminate round-off when converting frames to time and +// vice versa. +let sampleRate = 32768; + +// How many panner nodes to create for the test. +let nodesToCreate = 100; + +// Time step when each panner node starts. Make sure it starts on a frame +// boundary. +let timeStep = Math.floor(0.001 * sampleRate) / sampleRate; + +// Make sure we render long enough to get all of our nodes. +let renderLengthSeconds = timeStep * (nodesToCreate + 1); + +// Length of an impulse signal. +let pulseLengthFrames = Math.round(timeStep * sampleRate); + +// Globals to make debugging a little easier. +let context; +let impulse; +let bufferSource; +let panner; +let position; +let time; + +// For the record, these distance formulas were taken from the OpenAL +// spec +// (http://connect.creativelabs.com/openal/Documentation/OpenAL%201.1%20Specification.pdf), +// not the code. The Web Audio spec follows the OpenAL formulas. + +function linearDistance(panner, x, y, z) { + let distance = Math.sqrt(x * x + y * y + z * z); + distance = Math.min(distance, panner.maxDistance); + let rolloff = panner.rolloffFactor; + let gain = + (1 - + rolloff * (distance - panner.refDistance) / + (panner.maxDistance - panner.refDistance)); + + return gain; +} + +function inverseDistance(panner, x, y, z) { + let distance = Math.sqrt(x * x + y * y + z * z); + distance = Math.min(distance, panner.maxDistance); + let rolloff = panner.rolloffFactor; + let gain = panner.refDistance / + (panner.refDistance + rolloff * (distance - panner.refDistance)); + + return gain; +} + +function exponentialDistance(panner, x, y, z) { + let distance = Math.sqrt(x * x + y * y + z * z); + distance = Math.min(distance, panner.maxDistance); + let rolloff = panner.rolloffFactor; + let gain = Math.pow(distance / panner.refDistance, -rolloff); + + return gain; +} + +// Map the distance model to the function that implements the model +let distanceModelFunction = { + 'linear': linearDistance, + 'inverse': inverseDistance, + 'exponential': exponentialDistance +}; + +function createGraph(context, distanceModel, nodeCount) { + bufferSource = new Array(nodeCount); + panner = new Array(nodeCount); + position = new Array(nodeCount); + time = new Array(nodesToCreate); + + impulse = createImpulseBuffer(context, pulseLengthFrames); + + // Create all the sources and panners. + // + // We MUST use the EQUALPOWER panning model so that we can easily + // figure out the gain introduced by the panner. + // + // We want to stay in the middle of the panning range, which means + // we want to stay on the z-axis. If we don't, then the effect of + // panning model will be much more complicated. We're not testing + // the panner, but the distance model, so we want the panner effect + // to be simple. + // + // The panners are placed at a uniform intervals between the panner + // reference distance and the panner max distance. The source is + // also started at regular intervals. + for (let k = 0; k < nodeCount; ++k) { + bufferSource[k] = context.createBufferSource(); + bufferSource[k].buffer = impulse; + + panner[k] = context.createPanner(); + panner[k].panningModel = 'equalpower'; + panner[k].distanceModel = distanceModel; + + let distanceStep = + (panner[k].maxDistance - panner[k].refDistance) / nodeCount; + position[k] = distanceStep * k + panner[k].refDistance; + panner[k].setPosition(0, 0, position[k]); + + bufferSource[k].connect(panner[k]); + panner[k].connect(context.destination); + + time[k] = k * timeStep; + bufferSource[k].start(time[k]); + } +} + +// distanceModel should be the distance model string like +// "linear", "inverse", or "exponential". +function createTestAndRun(context, distanceModel, should) { + // To test the distance models, we create a number of panners at + // uniformly spaced intervals on the z-axis. Each of these are + // started at equally spaced time intervals. After rendering the + // signals, we examine where each impulse is located and the + // attenuation of the impulse. The attenuation is compared + // against our expected attenuation. + + createGraph(context, distanceModel, nodesToCreate); + + return context.startRendering().then( + buffer => checkDistanceResult(buffer, distanceModel, should)); +} + +// The gain caused by the EQUALPOWER panning model, if we stay on the +// z axis, with the default orientations. +function equalPowerGain() { + return Math.SQRT1_2; +} + +function checkDistanceResult(renderedBuffer, model, should) { + renderedData = renderedBuffer.getChannelData(0); + + // The max allowed error between the actual gain and the expected + // value. This is determined experimentally. Set to 0 to see + // what the actual errors are. + let maxAllowedError = 2.2720e-6; + + let success = true; + + // Number of impulses we found in the rendered result. + let impulseCount = 0; + + // Maximum relative error in the gain of the impulses. + let maxError = 0; + + // Array of locations of the impulses that were not at the + // expected location. (Contains the actual and expected frame + // of the impulse.) + let impulsePositionErrors = new Array(); + + // Step through the rendered data to find all the non-zero points + // so we can find where our distance-attenuated impulses are. + // These are tested against the expected attenuations at that + // distance. + for (let k = 0; k < renderedData.length; ++k) { + if (renderedData[k] != 0) { + // Convert from string to index. + let distanceFunction = distanceModelFunction[model]; + let expected = + distanceFunction(panner[impulseCount], 0, 0, position[impulseCount]); + + // Adjust for the center-panning of the EQUALPOWER panning + // model that we're using. + expected *= equalPowerGain(); + + let error = Math.abs(renderedData[k] - expected) / Math.abs(expected); + + maxError = Math.max(maxError, Math.abs(error)); + + should(renderedData[k]).beCloseTo(expected, {threshold: maxAllowedError}); + + // Keep track of any impulses that aren't where we expect them + // to be. + let expectedOffset = timeToSampleFrame(time[impulseCount], sampleRate); + if (k != expectedOffset) { + impulsePositionErrors.push({actual: k, expected: expectedOffset}); + } + ++impulseCount; + } + } + should(impulseCount, 'Number of impulses').beEqualTo(nodesToCreate); + + should(maxError, 'Max error in distance gains') + .beLessThanOrEqualTo(maxAllowedError); + + // Display any timing errors that we found. + if (impulsePositionErrors.length > 0) { + let actual = impulsePositionErrors.map(x => x.actual); + let expected = impulsePositionErrors.map(x => x.expected); + should(actual, 'Actual impulse positions found').beEqualToArray(expected); + } +} diff --git a/testing/web-platform/tests/webaudio/resources/merger-testing.js b/testing/web-platform/tests/webaudio/resources/merger-testing.js new file mode 100644 index 0000000000..4477ec0a1f --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/merger-testing.js @@ -0,0 +1,24 @@ +// This file is for the audiochannelmerger-* layout tests. +// Requires |audio-testing.js| to work properly. + +function testMergerInput(should, config) { + let context = new OfflineAudioContext(config.numberOfChannels, 128, 44100); + let merger = context.createChannelMerger(config.numberOfChannels); + let source = context.createBufferSource(); + source.buffer = createConstantBuffer(context, 128, config.testBufferContent); + + // Connect the output of source into the specified input of merger. + if (config.mergerInputIndex) + source.connect(merger, 0, config.mergerInputIndex); + else + source.connect(merger); + merger.connect(context.destination); + source.start(); + + return context.startRendering().then(function(buffer) { + let prefix = config.testBufferContent.length + '-channel source: '; + for (let i = 0; i < config.numberOfChannels; i++) + should(buffer.getChannelData(i), prefix + 'Channel #' + i) + .beConstantValueOf(config.expected[i]); + }); +} diff --git a/testing/web-platform/tests/webaudio/resources/mix-testing.js b/testing/web-platform/tests/webaudio/resources/mix-testing.js new file mode 100644 index 0000000000..63c8e1aca6 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/mix-testing.js @@ -0,0 +1,23 @@ +let toneLengthSeconds = 1; + +// Create a buffer with multiple channels. +// The signal frequency in each channel is the multiple of that in the first +// channel. +function createToneBuffer(context, frequency, duration, numberOfChannels) { + let sampleRate = context.sampleRate; + let sampleFrameLength = duration * sampleRate; + + let audioBuffer = + context.createBuffer(numberOfChannels, sampleFrameLength, sampleRate); + + let n = audioBuffer.length; + + for (let k = 0; k < numberOfChannels; ++k) { + let data = audioBuffer.getChannelData(k); + + for (let i = 0; i < n; ++i) + data[i] = Math.sin(frequency * (k + 1) * 2.0 * Math.PI * i / sampleRate); + } + + return audioBuffer; +} diff --git a/testing/web-platform/tests/webaudio/resources/mixing-rules.js b/testing/web-platform/tests/webaudio/resources/mixing-rules.js new file mode 100644 index 0000000000..e06a1468a3 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/mixing-rules.js @@ -0,0 +1,350 @@ +// Utilities for mixing rule testing. +// http://webaudio.github.io/web-audio-api/#channel-up-mixing-and-down-mixing + + +/** + * Create an n-channel buffer, with all sample data zero except for a shifted + * impulse. The impulse position depends on the channel index. For example, for + * a 4-channel buffer: + * channel 0: 1 0 0 0 0 0 0 0 + * channel 1: 0 1 0 0 0 0 0 0 + * channel 2: 0 0 1 0 0 0 0 0 + * channel 3: 0 0 0 1 0 0 0 0 + * @param {AudioContext} context Associated AudioContext. + * @param {Number} numberOfChannels Number of channels of test buffer. + * @param {Number} frameLength Buffer length in frames. + * @return {AudioBuffer} + */ +function createShiftedImpulseBuffer(context, numberOfChannels, frameLength) { + let shiftedImpulseBuffer = + context.createBuffer(numberOfChannels, frameLength, context.sampleRate); + for (let channel = 0; channel < numberOfChannels; ++channel) { + let data = shiftedImpulseBuffer.getChannelData(channel); + data[channel] = 1; + } + + return shiftedImpulseBuffer; +} + +/** + * Create a string that displays the content of AudioBuffer. + * @param {AudioBuffer} audioBuffer AudioBuffer object to stringify. + * @param {Number} frameLength Number of frames to be printed. + * @param {Number} frameOffset Starting frame position for printing. + * @return {String} + */ +function stringifyBuffer(audioBuffer, frameLength, frameOffset) { + frameOffset = (frameOffset || 0); + + let stringifiedBuffer = ''; + for (let channel = 0; channel < audioBuffer.numberOfChannels; ++channel) { + let channelData = audioBuffer.getChannelData(channel); + for (let i = 0; i < frameLength; ++i) + stringifiedBuffer += channelData[i + frameOffset] + ' '; + stringifiedBuffer += '\n'; + } + + return stringifiedBuffer; +} + +/** + * Compute number of channels from the connection. + * http://webaudio.github.io/web-audio-api/#dfn-computednumberofchannels + * @param {String} connections A string specifies the connection. For + * example, the string "128" means 3 + * connections, having 1, 2, and 8 channels + * respectively. + * @param {Number} channelCount Channel count. + * @param {String} channelCountMode Channel count mode. + * @return {Number} Computed number of channels. + */ +function computeNumberOfChannels(connections, channelCount, channelCountMode) { + if (channelCountMode == 'explicit') + return channelCount; + + // Must have at least one channel. + let computedNumberOfChannels = 1; + + // Compute "computedNumberOfChannels" based on all the connections. + for (let i = 0; i < connections.length; ++i) { + let connectionNumberOfChannels = parseInt(connections[i]); + computedNumberOfChannels = + Math.max(computedNumberOfChannels, connectionNumberOfChannels); + } + + if (channelCountMode == 'clamped-max') + computedNumberOfChannels = Math.min(computedNumberOfChannels, channelCount); + + return computedNumberOfChannels; +} + +/** + * Apply up/down-mixing (in-place summing) based on 'speaker' interpretation. + * @param {AudioBuffer} input Input audio buffer. + * @param {AudioBuffer} output Output audio buffer. + */ +function speakersSum(input, output) { + if (input.length != output.length) { + throw '[mixing-rules.js] speakerSum(): buffer lengths mismatch (input: ' + + input.length + ', output: ' + output.length + ')'; + } + + if (input.numberOfChannels === output.numberOfChannels) { + for (let channel = 0; channel < output.numberOfChannels; ++channel) { + let inputChannel = input.getChannelData(channel); + let outputChannel = output.getChannelData(channel); + for (let i = 0; i < outputChannel.length; i++) + outputChannel[i] += inputChannel[i]; + } + } else if (input.numberOfChannels < output.numberOfChannels) { + processUpMix(input, output); + } else { + processDownMix(input, output); + } +} + +/** + * In-place summing to |output| based on 'discrete' channel interpretation. + * @param {AudioBuffer} input Input audio buffer. + * @param {AudioBuffer} output Output audio buffer. + */ +function discreteSum(input, output) { + if (input.length != output.length) { + throw '[mixing-rules.js] speakerSum(): buffer lengths mismatch (input: ' + + input.length + ', output: ' + output.length + ')'; + } + + let numberOfChannels = + Math.min(input.numberOfChannels, output.numberOfChannels) + + for (let channel = 0; channel < numberOfChannels; ++channel) { + let inputChannel = input.getChannelData(channel); + let outputChannel = output.getChannelData(channel); + for (let i = 0; i < outputChannel.length; i++) + outputChannel[i] += inputChannel[i]; + } +} + +/** + * Perform up-mix by in-place summing to |output| buffer. + * @param {AudioBuffer} input Input audio buffer. + * @param {AudioBuffer} output Output audio buffer. + */ +function processUpMix(input, output) { + let numberOfInputChannels = input.numberOfChannels; + let numberOfOutputChannels = output.numberOfChannels; + let i, length = output.length; + + // Up-mixing: 1 -> 2, 1 -> 4 + // output.L += input + // output.R += input + // output.SL += 0 (in the case of 1 -> 4) + // output.SR += 0 (in the case of 1 -> 4) + if ((numberOfInputChannels === 1 && numberOfOutputChannels === 2) || + (numberOfInputChannels === 1 && numberOfOutputChannels === 4)) { + let inputChannel = input.getChannelData(0); + let outputChannel0 = output.getChannelData(0); + let outputChannel1 = output.getChannelData(1); + for (i = 0; i < length; i++) { + outputChannel0[i] += inputChannel[i]; + outputChannel1[i] += inputChannel[i]; + } + + return; + } + + // Up-mixing: 1 -> 5.1 + // output.L += 0 + // output.R += 0 + // output.C += input + // output.LFE += 0 + // output.SL += 0 + // output.SR += 0 + if (numberOfInputChannels == 1 && numberOfOutputChannels == 6) { + let inputChannel = input.getChannelData(0); + let outputChannel2 = output.getChannelData(2); + for (i = 0; i < length; i++) + outputChannel2[i] += inputChannel[i]; + + return; + } + + // Up-mixing: 2 -> 4, 2 -> 5.1 + // output.L += input.L + // output.R += input.R + // output.C += 0 (in the case of 2 -> 5.1) + // output.LFE += 0 (in the case of 2 -> 5.1) + // output.SL += 0 + // output.SR += 0 + if ((numberOfInputChannels === 2 && numberOfOutputChannels === 4) || + (numberOfInputChannels === 2 && numberOfOutputChannels === 6)) { + let inputChannel0 = input.getChannelData(0); + let inputChannel1 = input.getChannelData(1); + let outputChannel0 = output.getChannelData(0); + let outputChannel1 = output.getChannelData(1); + for (i = 0; i < length; i++) { + outputChannel0[i] += inputChannel0[i]; + outputChannel1[i] += inputChannel1[i]; + } + + return; + } + + // Up-mixing: 4 -> 5.1 + // output.L += input.L + // output.R += input.R + // output.C += 0 + // output.LFE += 0 + // output.SL += input.SL + // output.SR += input.SR + if (numberOfInputChannels === 4 && numberOfOutputChannels === 6) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let inputChannel2 = input.getChannelData(2); // input.SL + let inputChannel3 = input.getChannelData(3); // input.SR + let outputChannel0 = output.getChannelData(0); // output.L + let outputChannel1 = output.getChannelData(1); // output.R + let outputChannel4 = output.getChannelData(4); // output.SL + let outputChannel5 = output.getChannelData(5); // output.SR + for (i = 0; i < length; i++) { + outputChannel0[i] += inputChannel0[i]; + outputChannel1[i] += inputChannel1[i]; + outputChannel4[i] += inputChannel2[i]; + outputChannel5[i] += inputChannel3[i]; + } + + return; + } + + // All other cases, fall back to the discrete sum. + discreteSum(input, output); +} + +/** + * Perform down-mix by in-place summing to |output| buffer. + * @param {AudioBuffer} input Input audio buffer. + * @param {AudioBuffer} output Output audio buffer. + */ +function processDownMix(input, output) { + let numberOfInputChannels = input.numberOfChannels; + let numberOfOutputChannels = output.numberOfChannels; + let i, length = output.length; + + // Down-mixing: 2 -> 1 + // output += 0.5 * (input.L + input.R) + if (numberOfInputChannels === 2 && numberOfOutputChannels === 1) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let outputChannel0 = output.getChannelData(0); + for (i = 0; i < length; i++) + outputChannel0[i] += 0.5 * (inputChannel0[i] + inputChannel1[i]); + + return; + } + + // Down-mixing: 4 -> 1 + // output += 0.25 * (input.L + input.R + input.SL + input.SR) + if (numberOfInputChannels === 4 && numberOfOutputChannels === 1) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let inputChannel2 = input.getChannelData(2); // input.SL + let inputChannel3 = input.getChannelData(3); // input.SR + let outputChannel0 = output.getChannelData(0); + for (i = 0; i < length; i++) { + outputChannel0[i] += 0.25 * + (inputChannel0[i] + inputChannel1[i] + inputChannel2[i] + + inputChannel3[i]); + } + + return; + } + + // Down-mixing: 5.1 -> 1 + // output += sqrt(1/2) * (input.L + input.R) + input.C + // + 0.5 * (input.SL + input.SR) + if (numberOfInputChannels === 6 && numberOfOutputChannels === 1) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let inputChannel2 = input.getChannelData(2); // input.C + let inputChannel4 = input.getChannelData(4); // input.SL + let inputChannel5 = input.getChannelData(5); // input.SR + let outputChannel0 = output.getChannelData(0); + let scaleSqrtHalf = Math.sqrt(0.5); + for (i = 0; i < length; i++) { + outputChannel0[i] += + scaleSqrtHalf * (inputChannel0[i] + inputChannel1[i]) + + inputChannel2[i] + 0.5 * (inputChannel4[i] + inputChannel5[i]); + } + + return; + } + + // Down-mixing: 4 -> 2 + // output.L += 0.5 * (input.L + input.SL) + // output.R += 0.5 * (input.R + input.SR) + if (numberOfInputChannels == 4 && numberOfOutputChannels == 2) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let inputChannel2 = input.getChannelData(2); // input.SL + let inputChannel3 = input.getChannelData(3); // input.SR + let outputChannel0 = output.getChannelData(0); // output.L + let outputChannel1 = output.getChannelData(1); // output.R + for (i = 0; i < length; i++) { + outputChannel0[i] += 0.5 * (inputChannel0[i] + inputChannel2[i]); + outputChannel1[i] += 0.5 * (inputChannel1[i] + inputChannel3[i]); + } + + return; + } + + // Down-mixing: 5.1 -> 2 + // output.L += input.L + sqrt(1/2) * (input.C + input.SL) + // output.R += input.R + sqrt(1/2) * (input.C + input.SR) + if (numberOfInputChannels == 6 && numberOfOutputChannels == 2) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let inputChannel2 = input.getChannelData(2); // input.C + let inputChannel4 = input.getChannelData(4); // input.SL + let inputChannel5 = input.getChannelData(5); // input.SR + let outputChannel0 = output.getChannelData(0); // output.L + let outputChannel1 = output.getChannelData(1); // output.R + let scaleSqrtHalf = Math.sqrt(0.5); + for (i = 0; i < length; i++) { + outputChannel0[i] += inputChannel0[i] + + scaleSqrtHalf * (inputChannel2[i] + inputChannel4[i]); + outputChannel1[i] += inputChannel1[i] + + scaleSqrtHalf * (inputChannel2[i] + inputChannel5[i]); + } + + return; + } + + // Down-mixing: 5.1 -> 4 + // output.L += input.L + sqrt(1/2) * input.C + // output.R += input.R + sqrt(1/2) * input.C + // output.SL += input.SL + // output.SR += input.SR + if (numberOfInputChannels === 6 && numberOfOutputChannels === 4) { + let inputChannel0 = input.getChannelData(0); // input.L + let inputChannel1 = input.getChannelData(1); // input.R + let inputChannel2 = input.getChannelData(2); // input.C + let inputChannel4 = input.getChannelData(4); // input.SL + let inputChannel5 = input.getChannelData(5); // input.SR + let outputChannel0 = output.getChannelData(0); // output.L + let outputChannel1 = output.getChannelData(1); // output.R + let outputChannel2 = output.getChannelData(2); // output.SL + let outputChannel3 = output.getChannelData(3); // output.SR + let scaleSqrtHalf = Math.sqrt(0.5); + for (i = 0; i < length; i++) { + outputChannel0[i] += inputChannel0[i] + scaleSqrtHalf * inputChannel2[i]; + outputChannel1[i] += inputChannel1[i] + scaleSqrtHalf * inputChannel2[i]; + outputChannel2[i] += inputChannel4[i]; + outputChannel3[i] += inputChannel5[i]; + } + + return; + } + + // All other cases, fall back to the discrete sum. + discreteSum(input, output); +} diff --git a/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js b/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js new file mode 100644 index 0000000000..ad0631670d --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js @@ -0,0 +1,165 @@ +// Use a power of two to eliminate round-off converting from frames to time. +let sampleRate = 32768; + +// How many grains to play. +let numberOfTests = 100; + +// Duration of each grain to be played. Make a whole number of frames +let duration = Math.floor(0.01 * sampleRate) / sampleRate; + +// A little extra bit of silence between grain boundaries. Must be a whole +// number of frames. +let grainGap = Math.floor(0.005 * sampleRate) / sampleRate; + +// Time step between the start of each grain. We need to add a little +// bit of silence so we can detect grain boundaries +let timeStep = duration + grainGap; + +// Time step between the start for each grain. Must be a whole number of +// frames. +let grainOffsetStep = Math.floor(0.001 * sampleRate) / sampleRate; + +// How long to render to cover all of the grains. +let renderTime = (numberOfTests + 1) * timeStep; + +let context; +let renderedData; + +// Create a buffer containing the data that we want. The function f +// returns the desired value at sample frame k. +function createSignalBuffer(context, f) { + // Make sure the buffer has enough data for all of the possible + // grain offsets and durations. The additional 1 is for any + // round-off errors. + let signalLength = + Math.floor(1 + sampleRate * (numberOfTests * grainOffsetStep + duration)); + + let buffer = context.createBuffer(2, signalLength, sampleRate); + let data = buffer.getChannelData(0); + + for (let k = 0; k < signalLength; ++k) { + data[k] = f(k); + } + + return buffer; +} + +// From the data array, find the start and end sample frame for each +// grain. This depends on the data having 0's between grain, and +// that the grain is always strictly non-zero. +function findStartAndEndSamples(data) { + let nSamples = data.length; + + let startTime = []; + let endTime = []; + let lookForStart = true; + + // Look through the rendered data to find the start and stop + // times of each grain. + for (let k = 0; k < nSamples; ++k) { + if (lookForStart) { + // Find a non-zero point and record the start. We're not + // concerned with the value in this test, only that the + // grain started here. + if (renderedData[k]) { + startTime.push(k); + lookForStart = false; + } + } else { + // Find a zero and record the end of the grain. + if (!renderedData[k]) { + endTime.push(k); + lookForStart = true; + } + } + } + + return {start: startTime, end: endTime}; +} + +function playGrain(context, source, time, offset, duration) { + let bufferSource = context.createBufferSource(); + + bufferSource.buffer = source; + bufferSource.connect(context.destination); + bufferSource.start(time, offset, duration); +} + +// Play out all grains. Returns a object containing two arrays, one +// for the start time and one for the grain offset time. +function playAllGrains(context, source, numberOfNotes) { + let startTimes = new Array(numberOfNotes); + let offsets = new Array(numberOfNotes); + + for (let k = 0; k < numberOfNotes; ++k) { + let timeOffset = k * timeStep; + let grainOffset = k * grainOffsetStep; + + playGrain(context, source, timeOffset, grainOffset, duration); + startTimes[k] = timeOffset; + offsets[k] = grainOffset; + } + + return {startTimes: startTimes, grainOffsetTimes: offsets}; +} + +// Verify that the start and end frames for each grain match our +// expected start and end frames. +function verifyStartAndEndFrames(startEndFrames, should) { + let startFrames = startEndFrames.start; + let endFrames = startEndFrames.end; + + // Count of how many grains started at the incorrect time. + let errorCountStart = 0; + + // Count of how many grains ended at the incorrect time. + let errorCountEnd = 0; + + should( + startFrames.length == endFrames.length, 'Found all grain starts and ends') + .beTrue(); + + should(startFrames.length, 'Number of start frames').beEqualTo(numberOfTests); + should(endFrames.length, 'Number of end frames').beEqualTo(numberOfTests); + + // Examine the start and stop times to see if they match our + // expectations. + for (let k = 0; k < startFrames.length; ++k) { + let expectedStart = timeToSampleFrame(k * timeStep, sampleRate); + // The end point is the duration. + let expectedEnd = expectedStart + + grainLengthInSampleFrames(k * grainOffsetStep, duration, sampleRate); + + if (startFrames[k] != expectedStart) + ++errorCountStart; + if (endFrames[k] != expectedEnd) + ++errorCountEnd; + + should([startFrames[k], endFrames[k]], 'Pulse ' + k + ' boundary') + .beEqualToArray([expectedStart, expectedEnd]); + } + + // Check that all the grains started or ended at the correct time. + if (!errorCountStart) { + should( + startFrames.length, 'Number of grains that started at the correct time') + .beEqualTo(numberOfTests); + } else { + should( + errorCountStart, + 'Number of grains out of ' + numberOfTests + + 'that started at the wrong time') + .beEqualTo(0); + } + + if (!errorCountEnd) { + should(endFrames.length, 'Number of grains that ended at the correct time') + .beEqualTo(numberOfTests); + } else { + should( + errorCountEnd, + 'Number of grains out of ' + numberOfTests + + ' that ended at the wrong time') + .beEqualTo(0); + } +} diff --git a/testing/web-platform/tests/webaudio/resources/panner-formulas.js b/testing/web-platform/tests/webaudio/resources/panner-formulas.js new file mode 100644 index 0000000000..ae6f516668 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/panner-formulas.js @@ -0,0 +1,190 @@ +// For the record, these distance formulas were taken from the OpenAL +// spec +// (http://connect.creativelabs.com/openal/Documentation/OpenAL%201.1%20Specification.pdf), +// not the code. The Web Audio spec follows the OpenAL formulas. + +function linearDistance(panner, x, y, z) { + let distance = Math.sqrt(x * x + y * y + z * z); + let dref = Math.min(panner.refDistance, panner.maxDistance); + let dmax = Math.max(panner.refDistance, panner.maxDistance); + distance = Math.max(Math.min(distance, dmax), dref); + let rolloff = Math.max(Math.min(panner.rolloffFactor, 1), 0); + if (dref === dmax) + return 1 - rolloff; + + let gain = (1 - rolloff * (distance - dref) / (dmax - dref)); + + return gain; +} + +function inverseDistance(panner, x, y, z) { + let distance = Math.sqrt(x * x + y * y + z * z); + distance = Math.max(distance, panner.refDistance); + let rolloff = panner.rolloffFactor; + let gain = panner.refDistance / + (panner.refDistance + + rolloff * (Math.max(distance, panner.refDistance) - panner.refDistance)); + + return gain; +} + +function exponentialDistance(panner, x, y, z) { + let distance = Math.sqrt(x * x + y * y + z * z); + distance = Math.max(distance, panner.refDistance); + let rolloff = panner.rolloffFactor; + let gain = Math.pow(distance / panner.refDistance, -rolloff); + + return gain; +} + +// Simple implementations of 3D vectors implemented as a 3-element array. + +// x - y +function vec3Sub(x, y) { + let z = new Float32Array(3); + z[0] = x[0] - y[0]; + z[1] = x[1] - y[1]; + z[2] = x[2] - y[2]; + + return z; +} + +// x/|x| +function vec3Normalize(x) { + let mag = Math.hypot(...x); + return x.map(function(c) { + return c / mag; + }); +} + +// x == 0? +function vec3IsZero(x) { + return x[0] === 0 && x[1] === 0 && x[2] === 0; +} + +// Vector cross product +function vec3Cross(u, v) { + let cross = new Float32Array(3); + cross[0] = u[1] * v[2] - u[2] * v[1]; + cross[1] = u[2] * v[0] - u[0] * v[2]; + cross[2] = u[0] * v[1] - u[1] * v[0]; + return cross; +} + +// Dot product +function vec3Dot(x, y) { + return x[0] * y[0] + x[1] * y[1] + x[2] * y[2]; +} + +// a*x, for scalar a +function vec3Scale(a, x) { + return x.map(function(c) { + return a * c; + }); +} + +function calculateAzimuth(source, listener, listenerForward, listenerUp) { + let sourceListener = vec3Sub(source, listener); + + if (vec3IsZero(sourceListener)) + return 0; + + sourceListener = vec3Normalize(sourceListener); + + let listenerRight = vec3Normalize(vec3Cross(listenerForward, listenerUp)); + let listenerForwardNorm = vec3Normalize(listenerForward); + + let up = vec3Cross(listenerRight, listenerForwardNorm); + let upProjection = vec3Dot(sourceListener, up); + + let projectedSource = + vec3Normalize(vec3Sub(sourceListener, vec3Scale(upProjection, up))); + + let azimuth = + 180 / Math.PI * Math.acos(vec3Dot(projectedSource, listenerRight)); + + // Source in front or behind the listener + let frontBack = vec3Dot(projectedSource, listenerForwardNorm); + if (frontBack < 0) + azimuth = 360 - azimuth; + + // Make azimuth relative to "front" and not "right" listener vector. + if (azimuth >= 0 && azimuth <= 270) + azimuth = 90 - azimuth; + else + azimuth = 450 - azimuth; + + // We don't need elevation, so we're skipping that computation. + return azimuth; +} + +// Map our position angle to the azimuth angle (in degrees). +// +// An angle of 0 corresponds to an azimuth of 90 deg; pi, to -90 deg. +function angleToAzimuth(angle) { + return 90 - angle * 180 / Math.PI; +} + +// The gain caused by the EQUALPOWER panning model +function equalPowerGain(azimuth, numberOfChannels) { + let halfPi = Math.PI / 2; + + if (azimuth < -90) + azimuth = -180 - azimuth; + else + azimuth = 180 - azimuth; + + if (numberOfChannels == 1) { + let panPosition = (azimuth + 90) / 180; + + let gainL = Math.cos(halfPi * panPosition); + let gainR = Math.sin(halfPi * panPosition); + + return {left: gainL, right: gainR}; + } else { + if (azimuth <= 0) { + let panPosition = (azimuth + 90) / 90; + + let gainL = Math.cos(halfPi * panPosition); + let gainR = Math.sin(halfPi * panPosition); + + return {left: gainL, right: gainR}; + } else { + let panPosition = azimuth / 90; + + let gainL = Math.cos(halfPi * panPosition); + let gainR = Math.sin(halfPi * panPosition); + + return {left: gainL, right: gainR}; + } + } +} + +function applyPanner(azimuth, srcL, srcR, numberOfChannels) { + let length = srcL.length; + let outL = new Float32Array(length); + let outR = new Float32Array(length); + + if (numberOfChannels == 1) { + for (let k = 0; k < length; ++k) { + let gains = equalPowerGain(azimuth[k], numberOfChannels); + + outL[k] = srcL[k] * gains.left; + outR[k] = srcR[k] * gains.right; + } + } else { + for (let k = 0; k < length; ++k) { + let gains = equalPowerGain(azimuth[k], numberOfChannels); + + if (azimuth[k] <= 0) { + outL[k] = srcL[k] + srcR[k] * gains.left; + outR[k] = srcR[k] * gains.right; + } else { + outL[k] = srcL[k] * gains.left; + outR[k] = srcR[k] + srcL[k] * gains.right; + } + } + } + + return {left: outL, right: outR}; +} diff --git a/testing/web-platform/tests/webaudio/resources/panner-model-testing.js b/testing/web-platform/tests/webaudio/resources/panner-model-testing.js new file mode 100644 index 0000000000..4df3e17813 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/panner-model-testing.js @@ -0,0 +1,184 @@ +// Use a power of two to eliminate round-off when converting frames to time and +// vice versa. +let sampleRate = 32768; + +let numberOfChannels = 1; + +// Time step when each panner node starts. Make sure it starts on a frame +// boundary. +let timeStep = Math.floor(0.001 * sampleRate) / sampleRate; + +// Length of the impulse signal. +let pulseLengthFrames = Math.round(timeStep * sampleRate); + +// How many panner nodes to create for the test +let nodesToCreate = 100; + +// Be sure we render long enough for all of our nodes. +let renderLengthSeconds = timeStep * (nodesToCreate + 1); + +// These are global mostly for debugging. +let context; +let impulse; +let bufferSource; +let panner; +let position; +let time; + +let renderedBuffer; +let renderedLeft; +let renderedRight; + +function createGraph(context, nodeCount, positionSetter) { + bufferSource = new Array(nodeCount); + panner = new Array(nodeCount); + position = new Array(nodeCount); + time = new Array(nodeCount); + // Angle between panner locations. (nodeCount - 1 because we want + // to include both 0 and 180 deg. + let angleStep = Math.PI / (nodeCount - 1); + + if (numberOfChannels == 2) { + impulse = createStereoImpulseBuffer(context, pulseLengthFrames); + } else + impulse = createImpulseBuffer(context, pulseLengthFrames); + + for (let k = 0; k < nodeCount; ++k) { + bufferSource[k] = context.createBufferSource(); + bufferSource[k].buffer = impulse; + + panner[k] = context.createPanner(); + panner[k].panningModel = 'equalpower'; + panner[k].distanceModel = 'linear'; + + let angle = angleStep * k; + position[k] = {angle: angle, x: Math.cos(angle), z: Math.sin(angle)}; + positionSetter(panner[k], position[k].x, 0, position[k].z); + + bufferSource[k].connect(panner[k]); + panner[k].connect(context.destination); + + // Start the source + time[k] = k * timeStep; + bufferSource[k].start(time[k]); + } +} + +function createTestAndRun( + context, should, nodeCount, numberOfSourceChannels, positionSetter) { + numberOfChannels = numberOfSourceChannels; + + createGraph(context, nodeCount, positionSetter); + + return context.startRendering().then(buffer => checkResult(buffer, should)); +} + +// Map our position angle to the azimuth angle (in degrees). +// +// An angle of 0 corresponds to an azimuth of 90 deg; pi, to -90 deg. +function angleToAzimuth(angle) { + return 90 - angle * 180 / Math.PI; +} + +// The gain caused by the EQUALPOWER panning model +function equalPowerGain(angle) { + let azimuth = angleToAzimuth(angle); + + if (numberOfChannels == 1) { + let panPosition = (azimuth + 90) / 180; + + let gainL = Math.cos(0.5 * Math.PI * panPosition); + let gainR = Math.sin(0.5 * Math.PI * panPosition); + + return {left: gainL, right: gainR}; + } else { + if (azimuth <= 0) { + let panPosition = (azimuth + 90) / 90; + + let gainL = 1 + Math.cos(0.5 * Math.PI * panPosition); + let gainR = Math.sin(0.5 * Math.PI * panPosition); + + return {left: gainL, right: gainR}; + } else { + let panPosition = azimuth / 90; + + let gainL = Math.cos(0.5 * Math.PI * panPosition); + let gainR = 1 + Math.sin(0.5 * Math.PI * panPosition); + + return {left: gainL, right: gainR}; + } + } +} + +function checkResult(renderedBuffer, should) { + renderedLeft = renderedBuffer.getChannelData(0); + renderedRight = renderedBuffer.getChannelData(1); + + // The max error we allow between the rendered impulse and the + // expected value. This value is experimentally determined. Set + // to 0 to make the test fail to see what the actual error is. + let maxAllowedError = 1.1597e-6; + + let success = true; + + // Number of impulses found in the rendered result. + let impulseCount = 0; + + // Max (relative) error and the index of the maxima for the left + // and right channels. + let maxErrorL = 0; + let maxErrorIndexL = 0; + let maxErrorR = 0; + let maxErrorIndexR = 0; + + // Number of impulses that don't match our expected locations. + let timeCount = 0; + + // Locations of where the impulses aren't at the expected locations. + let timeErrors = new Array(); + + for (let k = 0; k < renderedLeft.length; ++k) { + // We assume that the left and right channels start at the same instant. + if (renderedLeft[k] != 0 || renderedRight[k] != 0) { + // The expected gain for the left and right channels. + let pannerGain = equalPowerGain(position[impulseCount].angle); + let expectedL = pannerGain.left; + let expectedR = pannerGain.right; + + // Absolute error in the gain. + let errorL = Math.abs(renderedLeft[k] - expectedL); + let errorR = Math.abs(renderedRight[k] - expectedR); + + if (Math.abs(errorL) > maxErrorL) { + maxErrorL = Math.abs(errorL); + maxErrorIndexL = impulseCount; + } + if (Math.abs(errorR) > maxErrorR) { + maxErrorR = Math.abs(errorR); + maxErrorIndexR = impulseCount; + } + + // Keep track of the impulses that didn't show up where we + // expected them to be. + let expectedOffset = timeToSampleFrame(time[impulseCount], sampleRate); + if (k != expectedOffset) { + timeErrors[timeCount] = {actual: k, expected: expectedOffset}; + ++timeCount; + } + ++impulseCount; + } + } + + should(impulseCount, 'Number of impulses found').beEqualTo(nodesToCreate); + + should( + timeErrors.map(x => x.actual), + 'Offsets of impulses at the wrong position') + .beEqualToArray(timeErrors.map(x => x.expected)); + + should(maxErrorL, 'Error in left channel gain values') + .beLessThanOrEqualTo(maxAllowedError); + + should(maxErrorR, 'Error in right channel gain values') + .beLessThanOrEqualTo(maxAllowedError); +} diff --git a/testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wav b/testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wav Binary files differnew file mode 100644 index 0000000000..f660c3c4b8 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wav diff --git a/testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js b/testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js new file mode 100644 index 0000000000..0d2ea12f6d --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js @@ -0,0 +1,45 @@ +// Test that exceptions are throw for invalid values for start and +// stop. +function testStartStop(should, node, options) { + // Test non-finite values for start. These should all throw a TypeError + const nonFiniteValues = [NaN, Infinity, -Infinity]; + + nonFiniteValues.forEach(time => { + should(() => { + node.start(time); + }, `start(${time})`) + .throw(TypeError); + }); + + should(() => { + node.stop(); + }, 'Calling stop() before start()').throw(DOMException, 'InvalidStateError'); + + should(() => { + node.start(-1); + }, 'start(-1)').throw(RangeError); + + if (options) { + options.forEach(test => { + should(() => {node.start(...test.args)}, + 'start(' + test.args + ')').throw(test.errorType); + }); + } + + node.start(); + should(() => { + node.start(); + }, 'Calling start() twice').throw(DOMException, 'InvalidStateError'); + should(() => { + node.stop(-1); + }, 'stop(-1)').throw(RangeError); + + // Test non-finite stop times + nonFiniteValues.forEach(time => { + should(() => { + node.stop(time); + }, `stop(${time})`) + .throw(TypeError); + }); +} + diff --git a/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js b/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js new file mode 100644 index 0000000000..6ea5eb6269 --- /dev/null +++ b/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js @@ -0,0 +1,205 @@ +let StereoPannerTest = (function() { + + // Constants + let PI_OVER_TWO = Math.PI * 0.5; + + // Use a power of two to eliminate any round-off when converting frames to + // time. + let gSampleRate = 32768; + + // Time step when each panner node starts. Make sure this is on a frame boundary. + let gTimeStep = Math.floor(0.001 * gSampleRate) / gSampleRate; + + // How many panner nodes to create for the test + let gNodesToCreate = 100; + + // Total render length for all of our nodes. + let gRenderLength = gTimeStep * (gNodesToCreate + 1) + gSampleRate; + + // Calculates channel gains based on equal power panning model. + // See: http://webaudio.github.io/web-audio-api/#panning-algorithm + function getChannelGain(pan, numberOfChannels) { + // The internal panning clips the pan value between -1, 1. + pan = Math.min(Math.max(pan, -1), 1); + let gainL, gainR; + // Consider number of channels and pan value's polarity. + if (numberOfChannels == 1) { + let panRadian = (pan * 0.5 + 0.5) * PI_OVER_TWO; + gainL = Math.cos(panRadian); + gainR = Math.sin(panRadian); + } else { + let panRadian = (pan <= 0 ? pan + 1 : pan) * PI_OVER_TWO; + if (pan <= 0) { + gainL = 1 + Math.cos(panRadian); + gainR = Math.sin(panRadian); + } else { + gainL = Math.cos(panRadian); + gainR = 1 + Math.sin(panRadian); + } + } + return {gainL: gainL, gainR: gainR}; + } + + + /** + * Test implementation class. + * @param {Object} options Test options + * @param {Object} options.description Test description + * @param {Object} options.numberOfInputChannels Number of input channels + */ + function Test(should, options) { + // Primary test flag. + this.success = true; + + this.should = should; + this.context = null; + this.prefix = options.prefix; + this.numberOfInputChannels = (options.numberOfInputChannels || 1); + switch (this.numberOfInputChannels) { + case 1: + this.description = 'Test for mono input'; + break; + case 2: + this.description = 'Test for stereo input'; + break; + } + + // Onset time position of each impulse. + this.onsets = []; + + // Pan position value of each impulse. + this.panPositions = []; + + // Locations of where the impulses aren't at the expected locations. + this.errors = []; + + // The index of the current impulse being verified. + this.impulseIndex = 0; + + // The max error we allow between the rendered impulse and the + // expected value. This value is experimentally determined. Set + // to 0 to make the test fail to see what the actual error is. + this.maxAllowedError = 1.284318e-7; + + // Max (absolute) error and the index of the maxima for the left + // and right channels. + this.maxErrorL = 0; + this.maxErrorR = 0; + this.maxErrorIndexL = 0; + this.maxErrorIndexR = 0; + + // The maximum value to use for panner pan value. The value will range from + // -panLimit to +panLimit. + this.panLimit = 1.0625; + } + + + Test.prototype.init = function() { + this.context = new OfflineAudioContext(2, gRenderLength, gSampleRate); + }; + + // Prepare an audio graph for testing. Create multiple impulse generators and + // panner nodes, then play them sequentially while varying the pan position. + Test.prototype.prepare = function() { + let impulse; + let impulseLength = Math.round(gTimeStep * gSampleRate); + let sources = []; + let panners = []; + + // Moves the pan value for each panner by pan step unit from -2 to 2. + // This is to check if the internal panning value is clipped properly. + let panStep = (2 * this.panLimit) / (gNodesToCreate - 1); + + if (this.numberOfInputChannels === 1) { + impulse = createImpulseBuffer(this.context, impulseLength); + } else { + impulse = createStereoImpulseBuffer(this.context, impulseLength); + } + + for (let i = 0; i < gNodesToCreate; i++) { + sources[i] = this.context.createBufferSource(); + panners[i] = this.context.createStereoPanner(); + sources[i].connect(panners[i]); + panners[i].connect(this.context.destination); + sources[i].buffer = impulse; + panners[i].pan.value = this.panPositions[i] = panStep * i - this.panLimit; + + // Store the onset time position of impulse. + this.onsets[i] = gTimeStep * i; + + sources[i].start(this.onsets[i]); + } + }; + + + Test.prototype.verify = function() { + let chanL = this.renderedBufferL; + let chanR = this.renderedBufferR; + for (let i = 0; i < chanL.length; i++) { + // Left and right channels must start at the same instant. + if (chanL[i] !== 0 || chanR[i] !== 0) { + // Get amount of error between actual and expected gain. + let expected = getChannelGain( + this.panPositions[this.impulseIndex], this.numberOfInputChannels); + let errorL = Math.abs(chanL[i] - expected.gainL); + let errorR = Math.abs(chanR[i] - expected.gainR); + + if (errorL > this.maxErrorL) { + this.maxErrorL = errorL; + this.maxErrorIndexL = this.impulseIndex; + } + if (errorR > this.maxErrorR) { + this.maxErrorR = errorR; + this.maxErrorIndexR = this.impulseIndex; + } + + // Keep track of the impulses that didn't show up where we expected + // them to be. + let expectedOffset = + timeToSampleFrame(this.onsets[this.impulseIndex], gSampleRate); + if (i != expectedOffset) { + this.errors.push({actual: i, expected: expectedOffset}); + } + + this.impulseIndex++; + } + } + }; + + + Test.prototype.showResult = function() { + this.should(this.impulseIndex, this.prefix + 'Number of impulses found') + .beEqualTo(gNodesToCreate); + + this.should( + this.errors.length, + this.prefix + 'Number of impulse at the wrong offset') + .beEqualTo(0); + + this.should(this.maxErrorL, this.prefix + 'Left channel error magnitude') + .beLessThanOrEqualTo(this.maxAllowedError); + + this.should(this.maxErrorR, this.prefix + 'Right channel error magnitude') + .beLessThanOrEqualTo(this.maxAllowedError); + }; + + Test.prototype.run = function() { + + this.init(); + this.prepare(); + + return this.context.startRendering().then(renderedBuffer => { + this.renderedBufferL = renderedBuffer.getChannelData(0); + this.renderedBufferR = renderedBuffer.getChannelData(1); + this.verify(); + this.showResult(); + }); + }; + + return { + create: function(should, options) { + return new Test(should, options); + } + }; + +})(); |