diff options
Diffstat (limited to 'dom/media/webaudio')
24 files changed, 117 insertions, 245 deletions
diff --git a/dom/media/webaudio/FFTBlock.cpp b/dom/media/webaudio/FFTBlock.cpp index 6789ca0264..79fb934a00 100644 --- a/dom/media/webaudio/FFTBlock.cpp +++ b/dom/media/webaudio/FFTBlock.cpp @@ -30,15 +30,14 @@ #include "FFTBlock.h" +#include "FFVPXRuntimeLinker.h" #include <complex> namespace mozilla { -typedef std::complex<double> Complex; +FFmpegFFTFuncs FFTBlock::sFFTFuncs = {}; -#ifdef MOZ_LIBAV_FFT -FFmpegRDFTFuncs FFTBlock::sRDFTFuncs; -#endif +using Complex = std::complex<double>; static double fdlibm_cabs(const Complex& z) { return fdlibm_hypot(real(z), imag(z)); @@ -51,16 +50,15 @@ static double fdlibm_carg(const Complex& z) { FFTBlock* FFTBlock::CreateInterpolatedBlock(const FFTBlock& block0, const FFTBlock& block1, double interp) { - FFTBlock* newBlock = new FFTBlock(block0.FFTSize()); + uint32_t fftSize = block0.FFTSize(); + FFTBlock* newBlock = new FFTBlock(fftSize, 1.0f / AssertedCast<float>(fftSize)); newBlock->InterpolateFrequencyComponents(block0, block1, interp); // In the time-domain, the 2nd half of the response must be zero, to avoid // circular convolution aliasing... - int fftSize = newBlock->FFTSize(); AlignedTArray<float> buffer(fftSize); - newBlock->GetInverseWithoutScaling(buffer.Elements()); - AudioBufferInPlaceScale(buffer.Elements(), 1.0f / fftSize, fftSize / 2); + newBlock->GetInverse(buffer.Elements()); PodZero(buffer.Elements() + fftSize / 2, fftSize / 2); // Put back into frequency domain. diff --git a/dom/media/webaudio/FFTBlock.h b/dom/media/webaudio/FFTBlock.h index 6af882f9af..ecce39d7b1 100644 --- a/dom/media/webaudio/FFTBlock.h +++ b/dom/media/webaudio/FFTBlock.h @@ -7,31 +7,17 @@ #ifndef FFTBlock_h_ #define FFTBlock_h_ -#ifdef BUILD_ARM_NEON -# include <cmath> -# include "mozilla/arm.h" -# include "dl/sp/api/omxSP.h" -#endif - #include "AlignedTArray.h" #include "AudioNodeEngine.h" -#if defined(MOZ_LIBAV_FFT) -# include "FFmpegRDFTTypes.h" -# include "FFVPXRuntimeLinker.h" -#else -# include "kiss_fft/kiss_fftr.h" -#endif +#include "FFVPXRuntimeLinker.h" +#include "ffvpx/tx.h" namespace mozilla { // This class defines an FFT block, loosely modeled after Blink's FFTFrame // class to make sharing code with Blink easy. -// Currently it's implemented on top of KissFFT on all platforms. class FFTBlock final { union ComplexU { -#if !defined(MOZ_LIBAV_FFT) - kiss_fft_cpx c; -#endif float f[2]; struct { float r; @@ -41,28 +27,13 @@ class FFTBlock final { public: static void MainThreadInit() { -#ifdef MOZ_LIBAV_FFT FFVPXRuntimeLinker::Init(); - if (!sRDFTFuncs.init) { - FFVPXRuntimeLinker::GetRDFTFuncs(&sRDFTFuncs); + if (!sFFTFuncs.init) { + FFVPXRuntimeLinker::GetFFTFuncs(&sFFTFuncs); } -#endif } - - explicit FFTBlock(uint32_t aFFTSize) -#if defined(MOZ_LIBAV_FFT) - : mAvRDFT(nullptr), - mAvIRDFT(nullptr) -#else - : mKissFFT(nullptr), - mKissIFFT(nullptr) -# ifdef BUILD_ARM_NEON - , - mOmxFFT(nullptr), - mOmxIFFT(nullptr) -# endif -#endif - { + explicit FFTBlock(uint32_t aFFTSize, float aInverseScaling = 1.0f) + : mInverseScaling(aInverseScaling) { MOZ_COUNT_CTOR(FFTBlock); SetFFTSize(aFFTSize); } @@ -83,63 +54,33 @@ class FFTBlock final { return; } -#if defined(MOZ_LIBAV_FFT) - PodCopy(mOutputBuffer.Elements()->f, aData, mFFTSize); - sRDFTFuncs.calc(mAvRDFT, mOutputBuffer.Elements()->f); - // Recover packed Nyquist. - mOutputBuffer[mFFTSize / 2].r = mOutputBuffer[0].i; - mOutputBuffer[0].i = 0.0f; -#else -# ifdef BUILD_ARM_NEON - if (mozilla::supports_neon()) { - omxSP_FFTFwd_RToCCS_F32_Sfs(aData, mOutputBuffer.Elements()->f, mOmxFFT); - } else -# endif - { - kiss_fftr(mKissFFT, aData, &(mOutputBuffer.Elements()->c)); - } + mFn(mTxCtx, mOutputBuffer.Elements()->f, const_cast<float*>(aData), + 2 * sizeof(float)); +#ifdef DEBUG + mInversePerformed = false; #endif } - // Inverse-transform internal data and store the resulting FFTSize() - // points in aDataOut. - void GetInverse(float* aDataOut) { - GetInverseWithoutScaling(aDataOut); - AudioBufferInPlaceScale(aDataOut, 1.0f / mFFTSize, mFFTSize); - } - // Inverse-transform internal frequency data and store the resulting // FFTSize() points in |aDataOut|. If frequency data has not already been // scaled, then the output will need scaling by 1/FFTSize(). - void GetInverseWithoutScaling(float* aDataOut) { + void GetInverse(float* aDataOut) { if (!EnsureIFFT()) { std::fill_n(aDataOut, mFFTSize, 0.0f); return; }; - -#if defined(MOZ_LIBAV_FFT) - { - // Even though this function doesn't scale, the libav forward transform - // gives a value that needs scaling by 2 in order for things to turn out - // similar to how we expect from kissfft/openmax. - AudioBufferCopyWithScale(mOutputBuffer.Elements()->f, 2.0f, aDataOut, - mFFTSize); - aDataOut[1] = 2.0f * mOutputBuffer[mFFTSize / 2].r; // Packed Nyquist - sRDFTFuncs.calc(mAvIRDFT, aDataOut); - } -#else -# ifdef BUILD_ARM_NEON - if (mozilla::supports_neon()) { - omxSP_FFTInv_CCSToR_F32_Sfs_unscaled(mOutputBuffer.Elements()->f, - aDataOut, mOmxIFFT); - } else -# endif - { - kiss_fftri(mKissIFFT, &(mOutputBuffer.Elements()->c), aDataOut); - } + // When performing an inverse transform, tx overwrites the input. This + // asserts that forward / inverse transforms are interleaved to avoid having + // to keep the input around. + MOZ_ASSERT(!mInversePerformed); + mIFn(mITxCtx, aDataOut, mOutputBuffer.Elements()->f, 2 * sizeof(float)); +#ifdef DEBUG + mInversePerformed = true; #endif } void Multiply(const FFTBlock& aFrame) { + MOZ_ASSERT(!mInversePerformed); + uint32_t halfSize = mFFTSize / 2; // DFTs are not packed. MOZ_ASSERT(mOutputBuffer[0].i == 0); @@ -161,8 +102,8 @@ class FFTBlock final { MOZ_ASSERT(dataSize <= FFTSize()); AlignedTArray<float> paddedData; paddedData.SetLength(FFTSize()); - AudioBufferCopyWithScale(aData, 1.0f / FFTSize(), paddedData.Elements(), - dataSize); + AudioBufferCopyWithScale(aData, 1.0f / AssertedCast<float>(FFTSize()), + paddedData.Elements(), dataSize); PodZero(paddedData.Elements() + dataSize, mFFTSize - dataSize); PerformFFT(paddedData.Elements()); } @@ -180,46 +121,35 @@ class FFTBlock final { double ExtractAverageGroupDelay(); uint32_t FFTSize() const { return mFFTSize; } - float RealData(uint32_t aIndex) const { return mOutputBuffer[aIndex].r; } - float& RealData(uint32_t aIndex) { return mOutputBuffer[aIndex].r; } - float ImagData(uint32_t aIndex) const { return mOutputBuffer[aIndex].i; } - float& ImagData(uint32_t aIndex) { return mOutputBuffer[aIndex].i; } + float RealData(uint32_t aIndex) const { + MOZ_ASSERT(!mInversePerformed); + return mOutputBuffer[aIndex].r; + } + float& RealData(uint32_t aIndex) { + MOZ_ASSERT(!mInversePerformed); + return mOutputBuffer[aIndex].r; + } + float ImagData(uint32_t aIndex) const { + MOZ_ASSERT(!mInversePerformed); + return mOutputBuffer[aIndex].i; + } + float& ImagData(uint32_t aIndex) { + MOZ_ASSERT(!mInversePerformed); + return mOutputBuffer[aIndex].i; + } size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { size_t amount = 0; -#if defined(MOZ_LIBAV_FFT) - auto ComputedSizeOfContextIfSet = [this](void* aContext) -> size_t { - if (!aContext) { - return 0; - } - // RDFTContext is only forward declared in public headers, but this is - // an estimate based on a value of 231 seen requested from - // _aligned_alloc on Win64. Don't use malloc_usable_size() because the - // context pointer is not necessarily from malloc. - size_t amount = 232; - // Add size of allocations performed in ff_fft_init(). - // The maximum FFT size used is 32768 = 2^15 and so revtab32 is not - // allocated. - MOZ_ASSERT(mFFTSize <= 32768); - amount += mFFTSize * (sizeof(uint16_t) + 2 * sizeof(float)); - - return amount; - }; + // malloc_usable_size can't be used here because the pointer isn't + // necessarily from malloc. This value has been manually checked. + if (mTxCtx) { + amount += 711; + } + if (mTxCtx) { + amount += 711; + } - amount += ComputedSizeOfContextIfSet(mAvRDFT); - amount += ComputedSizeOfContextIfSet(mAvIRDFT); -#else -# ifdef BUILD_ARM_NEON - amount += aMallocSizeOf(mOmxFFT); - amount += aMallocSizeOf(mOmxIFFT); -# endif -# ifdef USE_SIMD -# error kiss fft uses malloc only when USE_SIMD is not defined -# endif - amount += aMallocSizeOf(mKissFFT); - amount += aMallocSizeOf(mKissIFFT); -#endif amount += mOutputBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); return amount; } @@ -228,119 +158,67 @@ class FFTBlock final { return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); } - private: FFTBlock(const FFTBlock& other) = delete; void operator=(const FFTBlock& other) = delete; + private: bool EnsureFFT() { -#if defined(MOZ_LIBAV_FFT) - if (!mAvRDFT) { - if (!sRDFTFuncs.init) { + if (!mTxCtx) { + if (!sFFTFuncs.init) { return false; } - - mAvRDFT = sRDFTFuncs.init(FloorLog2(mFFTSize), DFT_R2C); - } -#else -# ifdef BUILD_ARM_NEON - if (mozilla::supports_neon()) { - if (!mOmxFFT) { - mOmxFFT = createOmxFFT(mFFTSize); - } - } else -# endif - { - if (!mKissFFT) { - mKissFFT = kiss_fftr_alloc(mFFTSize, 0, nullptr, nullptr); - } + // Forward transform is always unscaled for our purpose. + float scale = 1.0f; + int rv = sFFTFuncs.init(&mTxCtx, &mFn, AV_TX_FLOAT_RDFT, 0 /* forward */, + AssertedCast<int>(mFFTSize), &scale, 0); + MOZ_ASSERT(!rv, "av_tx_init: invalid parameters (forward)"); + return !rv; } -#endif return true; } - bool EnsureIFFT() { -#if defined(MOZ_LIBAV_FFT) - if (!mAvIRDFT) { - if (!sRDFTFuncs.init) { + if (!mITxCtx) { + if (!sFFTFuncs.init) { return false; } - - mAvIRDFT = sRDFTFuncs.init(FloorLog2(mFFTSize), IDFT_C2R); - } -#else -# ifdef BUILD_ARM_NEON - if (mozilla::supports_neon()) { - if (!mOmxIFFT) { - mOmxIFFT = createOmxFFT(mFFTSize); - } - } else -# endif - { - if (!mKissIFFT) { - mKissIFFT = kiss_fftr_alloc(mFFTSize, 1, nullptr, nullptr); - } + int rv = + sFFTFuncs.init(&mITxCtx, &mIFn, AV_TX_FLOAT_RDFT, 1 /* inverse */, + AssertedCast<int>(mFFTSize), &mInverseScaling, 0); + MOZ_ASSERT(!rv, "av_tx_init: invalid parameters (inverse)"); + return !rv; } -#endif return true; } - -#ifdef BUILD_ARM_NEON - static OMXFFTSpec_R_F32* createOmxFFT(uint32_t aFFTSize) { - MOZ_ASSERT((aFFTSize & (aFFTSize - 1)) == 0); - OMX_INT bufSize; - OMX_INT order = FloorLog2(aFFTSize); - MOZ_ASSERT(aFFTSize >> order == 1); - OMXResult status = omxSP_FFTGetBufSize_R_F32(order, &bufSize); - if (status == OMX_Sts_NoErr) { - OMXFFTSpec_R_F32* context = - static_cast<OMXFFTSpec_R_F32*>(malloc(bufSize)); - if (omxSP_FFTInit_R_F32(context, order) != OMX_Sts_NoErr) { - return nullptr; - } - return context; - } - return nullptr; - } -#endif - void Clear() { -#if defined(MOZ_LIBAV_FFT) - if (mAvRDFT) { - sRDFTFuncs.end(mAvRDFT); - mAvRDFT = nullptr; + if (mTxCtx) { + sFFTFuncs.uninit(&mTxCtx); + mTxCtx = nullptr; + mFn = nullptr; } - if (mAvIRDFT) { - sRDFTFuncs.end(mAvIRDFT); - mAvIRDFT = nullptr; + if (mITxCtx) { + sFFTFuncs.uninit(&mITxCtx); + mITxCtx = nullptr; + mIFn = nullptr; } -#else -# ifdef BUILD_ARM_NEON - free(mOmxFFT); - free(mOmxIFFT); - mOmxFFT = mOmxIFFT = nullptr; -# endif - free(mKissFFT); - free(mKissIFFT); - mKissFFT = mKissIFFT = nullptr; -#endif } void AddConstantGroupDelay(double sampleFrameDelay); void InterpolateFrequencyComponents(const FFTBlock& block0, const FFTBlock& block1, double interp); -#if defined(MOZ_LIBAV_FFT) - static FFmpegRDFTFuncs sRDFTFuncs; - RDFTContext* mAvRDFT; - RDFTContext* mAvIRDFT; -#else - kiss_fftr_cfg mKissFFT; - kiss_fftr_cfg mKissIFFT; -# ifdef BUILD_ARM_NEON - OMXFFTSpec_R_F32* mOmxFFT; - OMXFFTSpec_R_F32* mOmxIFFT; -# endif -#endif + static FFmpegFFTFuncs sFFTFuncs; + // Context and function pointer for forward transform + AVTXContext* mTxCtx{}; + av_tx_fn mFn{}; + // Context and function pointer for inverse transform + AVTXContext* mITxCtx{}; + av_tx_fn mIFn{}; AlignedTArray<ComplexU> mOutputBuffer; - uint32_t mFFTSize; + uint32_t mFFTSize{}; + // A scaling that is performed when doing an inverse transform. The forward + // transform is always unscaled. + float mInverseScaling; +#ifdef DEBUG + bool mInversePerformed = false; +#endif }; } // namespace mozilla diff --git a/dom/media/webaudio/OscillatorNode.cpp b/dom/media/webaudio/OscillatorNode.cpp index 70592f6e96..65a928f126 100644 --- a/dom/media/webaudio/OscillatorNode.cpp +++ b/dom/media/webaudio/OscillatorNode.cpp @@ -329,10 +329,8 @@ class OscillatorNodeEngine final : public AudioNodeEngine { case OscillatorType::Custom: ComputeCustom(output, start, end, frequency, detune); break; - case OscillatorType::EndGuard_: - MOZ_ASSERT_UNREACHABLE("end guard"); - // Avoid `default:` so that `-Wswitch` catches missing enumerators - // at compile time. + // Avoid `default:` so that `-Wswitch` catches missing enumerators at + // compile time. }; } diff --git a/dom/media/webaudio/blink/FFTConvolver.cpp b/dom/media/webaudio/blink/FFTConvolver.cpp index 2ade9031ce..f9b456a0d4 100644 --- a/dom/media/webaudio/blink/FFTConvolver.cpp +++ b/dom/media/webaudio/blink/FFTConvolver.cpp @@ -85,7 +85,7 @@ const float* FFTConvolver::process(FFTBlock* fftKernel, const float* sourceP) { // The input buffer is now filled (get frequency-domain version) m_frame.PerformFFT(m_inputBuffer.Elements()); m_frame.Multiply(*fftKernel); - m_frame.GetInverseWithoutScaling(m_outputBuffer.Elements()); + m_frame.GetInverse(m_outputBuffer.Elements()); // Overlap-add 1st half from previous time AudioBufferAddWithScale(m_lastOverlapBuffer.Elements(), 1.0f, diff --git a/dom/media/webaudio/blink/HRTFKernel.cpp b/dom/media/webaudio/blink/HRTFKernel.cpp index ecaa846a66..96a53609f2 100644 --- a/dom/media/webaudio/blink/HRTFKernel.cpp +++ b/dom/media/webaudio/blink/HRTFKernel.cpp @@ -38,7 +38,7 @@ static float extractAverageGroupDelay(float* impulseP, size_t length) { // Check for power-of-2. MOZ_ASSERT(length && (length & (length - 1)) == 0); - FFTBlock estimationFrame(length); + FFTBlock estimationFrame(length, 1.f / length); estimationFrame.PerformFFT(impulseP); float frameDelay = diff --git a/dom/media/webaudio/blink/PeriodicWave.cpp b/dom/media/webaudio/blink/PeriodicWave.cpp index 6b1d173008..4ed8829928 100644 --- a/dom/media/webaudio/blink/PeriodicWave.cpp +++ b/dom/media/webaudio/blink/PeriodicWave.cpp @@ -266,7 +266,7 @@ void PeriodicWave::createBandLimitedTables(float fundamentalFrequency, // Apply an inverse FFT to generate the time-domain table data. float* data = m_bandLimitedTables[rangeIndex]->Elements(); - frame.GetInverseWithoutScaling(data); + frame.GetInverse(data); // For the first range (which has the highest power), calculate // its peak value then compute normalization scale. diff --git a/dom/media/webaudio/moz.build b/dom/media/webaudio/moz.build index 2e82d3daa8..3ee8c0aa76 100644 --- a/dom/media/webaudio/moz.build +++ b/dom/media/webaudio/moz.build @@ -130,8 +130,6 @@ if CONFIG["TARGET_CPU"] == "aarch64" or CONFIG["BUILD_ARM_NEON"]: LOCAL_INCLUDES += ["/third_party/xsimd/include"] SOURCES += ["AudioNodeEngineNEON.cpp"] SOURCES["AudioNodeEngineNEON.cpp"].flags += CONFIG["NEON_FLAGS"] - if CONFIG["BUILD_ARM_NEON"]: - LOCAL_INCLUDES += ["/media/openmax_dl/dl/api/"] # Are we targeting x86 or x64? If so, build SSEX files. if CONFIG["INTEL_ARCHITECTURE"]: diff --git a/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js b/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js index 2a5a4bff89..439dbec0c4 100644 --- a/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js +++ b/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js @@ -1,3 +1,3 @@ -onmessage = function (event) { +onmessage = function () { postMessage("Pong"); }; diff --git a/dom/media/webaudio/test/test_OfflineAudioContext.html b/dom/media/webaudio/test/test_OfflineAudioContext.html index d9403566ae..6d8a907542 100644 --- a/dom/media/webaudio/test/test_OfflineAudioContext.html +++ b/dom/media/webaudio/test/test_OfflineAudioContext.html @@ -92,7 +92,7 @@ addLoadEvent(function() { ctx.startRendering().then(function() { ok(false, "Promise should not resolve when startRendering is called a second time on an OfflineAudioContext") finish(); - }).catch(function(err) { + }).catch(function() { ok(true, "Promise should reject when startRendering is called a second time on an OfflineAudioContext") finish(); }); @@ -106,7 +106,7 @@ addLoadEvent(function() { ctx.startRendering().then(function(b) { is(renderedBuffer, null, "The promise callback should be called first."); setOrCompareRenderedBuffer(b); - }).catch(function (error) { + }).catch(function () { ok(false, "The promise from OfflineAudioContext.startRendering should never be rejected"); }); }); diff --git a/dom/media/webaudio/test/test_WebAudioMemoryReporting.html b/dom/media/webaudio/test/test_WebAudioMemoryReporting.html index 693e558304..027e3bcc56 100644 --- a/dom/media/webaudio/test/test_WebAudioMemoryReporting.html +++ b/dom/media/webaudio/test/test_WebAudioMemoryReporting.html @@ -29,7 +29,7 @@ for (var i = 0; i < nodeTypes.length; ++i) { } } -var handleReport = function(aProcess, aPath, aKind, aUnits, aAmount, aDesc) { +var handleReport = function(aProcess, aPath, aKind, aUnits, aAmount) { if (aPath in usages) { usages[aPath] += aAmount; } diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html b/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html index 0411b74ce5..e8549af37d 100644 --- a/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html @@ -34,7 +34,7 @@ addLoadEvent(function() { var source = context.createBufferSource(); - source.onended = function(e) { + source.onended = function() { // The timing at which the audioprocess and ended listeners are called can // change, hence the fuzzy equal here. var errorRatio = samplesFromSource / (0.5 * context.sampleRate); diff --git a/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html index 36cf8f720c..62bdd3fd54 100644 --- a/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html +++ b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html @@ -72,10 +72,10 @@ function tryLegalOpeerationsOnClosedContext(ctx) { }); }); loadFile("ting-44.1k-1ch.ogg", function(buf) { - ctx.decodeAudioData(buf).then(function(decodedBuf) { + ctx.decodeAudioData(buf).then(function() { ok(true, "decodeAudioData on a closed context should work, it did.") finish(); - }).catch(function(e){ + }).catch(function(){ ok(false, "decodeAudioData on a closed context should work, it did not"); finish(); }); @@ -103,7 +103,7 @@ function testMultiContextOutput() { silentBuffersInARow = 0; - sp2.onaudioprocess = function(e) { + sp2.onaudioprocess = function() { ac1.suspend().then(function() { is(ac1.state, "suspended", "ac1 is suspended"); sp2.onaudioprocess = checkSilence; @@ -350,7 +350,7 @@ function testOfflineAudioContext() { o.onstatechange = beforeStartRendering; - o.startRendering().then(function(buffer) { + o.startRendering().then(function() { finishedRendering = true; }); } diff --git a/dom/media/webaudio/test/test_bug1056032.html b/dom/media/webaudio/test/test_bug1056032.html index ba38267e19..77427ee5f0 100644 --- a/dom/media/webaudio/test/test_bug1056032.html +++ b/dom/media/webaudio/test/test_bug1056032.html @@ -19,7 +19,7 @@ addLoadEvent(function() { xhr.responseType = "arraybuffer"; xhr.onload = function() { var context = new AudioContext(); - context.decodeAudioData(xhr.response, function(b) { + context.decodeAudioData(xhr.response, function() { ok(true, "We can decode an mp3 using decodeAudioData"); SimpleTest.finish(); }, function() { diff --git a/dom/media/webaudio/test/test_bug867174.html b/dom/media/webaudio/test/test_bug867174.html index e949bcec41..dd66c77303 100644 --- a/dom/media/webaudio/test/test_bug867174.html +++ b/dom/media/webaudio/test/test_bug867174.html @@ -21,7 +21,7 @@ addLoadEvent(function() { sp.connect(ctx.destination); source.start(0); - sp.onaudioprocess = function(e) { + sp.onaudioprocess = function() { // Now set the buffer source.buffer = buffer; diff --git a/dom/media/webaudio/test/test_convolverNodeNormalization.html b/dom/media/webaudio/test/test_convolverNodeNormalization.html index 24cb7d1670..b55dbba0c2 100644 --- a/dom/media/webaudio/test/test_convolverNodeNormalization.html +++ b/dom/media/webaudio/test/test_convolverNodeNormalization.html @@ -13,7 +13,7 @@ const LENGTH = 12800; // tolerate 16-bit math. const EPSILON = 1.0 / Math.pow(2, 15); -function test_normalization_via_response_concat(delayIndex) +function test_normalization_via_response_concat() { var context = new OfflineAudioContext(1, LENGTH, sampleRate); diff --git a/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html b/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html index e7c6d2db0c..cd86c0a1d3 100644 --- a/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html +++ b/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html @@ -19,7 +19,7 @@ var testDecodeAudioDataOnDetachedBuffer = function(buffer) { is(buffer.byteLength, 0, "Buffer should be detached"); // call decodeAudioData on detached buffer - context.decodeAudioData(buffer).then(function(b) { + context.decodeAudioData(buffer).then(function() { ok(false, "We should not be able to decode the detached buffer but we do"); SimpleTest.finish(); }, function(r) { diff --git a/dom/media/webaudio/test/test_decodeAudioDataPromise.html b/dom/media/webaudio/test/test_decodeAudioDataPromise.html index 139a686db1..a686c275cd 100644 --- a/dom/media/webaudio/test/test_decodeAudioDataPromise.html +++ b/dom/media/webaudio/test/test_decodeAudioDataPromise.html @@ -27,7 +27,7 @@ var ac = new AudioContext(); expectNoException(function() { var p = ac.decodeAudioData(" "); ok(p instanceof Promise, "AudioContext.decodeAudioData should return a Promise"); - p.then(function(data) { + p.then(function() { ok(false, "Promise should not resolve with an invalid source buffer."); finish(); }).catch(function(e) { diff --git a/dom/media/webaudio/test/test_decodeAudioError.html b/dom/media/webaudio/test/test_decodeAudioError.html index f18b971ac4..b6dd6ff74b 100644 --- a/dom/media/webaudio/test/test_decodeAudioError.html +++ b/dom/media/webaudio/test/test_decodeAudioError.html @@ -34,11 +34,11 @@ function errorExpectedWithFile(file, errorMsg) { xhr.open("GET", file, true); xhr.responseType = "arraybuffer"; xhr.onload = function() { - ctx.decodeAudioData(xhr.response, buffer => { + ctx.decodeAudioData(xhr.response, () => { ok(false, "You should not be able to decode that"); finish(); }, e => test(e)) - .then(buffer => { + .then(() => { ok(false, "You should not be able to decode that"); finish(); }) diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNode.html b/dom/media/webaudio/test/test_dynamicsCompressorNode.html index 05b6887a53..8f197aa32f 100644 --- a/dom/media/webaudio/test/test_dynamicsCompressorNode.html +++ b/dom/media/webaudio/test/test_dynamicsCompressorNode.html @@ -48,7 +48,7 @@ addLoadEvent(function() { osc.start(); var iteration = 0; - sp.onaudioprocess = function(e) { + sp.onaudioprocess = function() { if (iteration > 10) { ok(compressor.reduction < 0, "Feeding a full-scale sine to a compressor should result in an db" + diff --git a/dom/media/webaudio/test/test_event_listener_leaks.html b/dom/media/webaudio/test/test_event_listener_leaks.html index a3bcc9259e..f76e1d3e55 100644 --- a/dom/media/webaudio/test/test_event_listener_leaks.html +++ b/dom/media/webaudio/test/test_event_listener_leaks.html @@ -18,7 +18,7 @@ // exercise the leak condition. async function useAudioContext(contentWindow) { let ctx = new contentWindow.AudioContext(); - ctx.onstatechange = e => { + ctx.onstatechange = () => { contentWindow.stateChangeCount += 1; }; diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html index 7920af9f7b..84d41df50e 100644 --- a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html +++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html @@ -50,7 +50,7 @@ function waitForAudio(analysisFunction, cancelPromise) { }); } -async function test(sourceNode) { +async function test() { try { await analyser.connect(context.destination); diff --git a/dom/media/webaudio/test/test_pannerNodeTail.html b/dom/media/webaudio/test/test_pannerNodeTail.html index 7035780bf2..bb60fe05da 100644 --- a/dom/media/webaudio/test/test_pannerNodeTail.html +++ b/dom/media/webaudio/test/test_pannerNodeTail.html @@ -153,7 +153,7 @@ function startTest() { } source.buffer = buffer; source.start(0); - source.onended = function(e) { + source.onended = function() { gotEnded = true; }; diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html b/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html index fb45895380..0de8818d82 100644 --- a/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html +++ b/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html @@ -15,7 +15,7 @@ addLoadEvent(function() { var context = new AudioContext(); var sp = context.createScriptProcessor(2048, 2, 2); - sp.onaudioprocess = function(e) { + sp.onaudioprocess = function() { ok(false, "Should not call onaudioprocess if the node is not connected."); sp.onaudioprocess = null; SimpleTest.finish(); diff --git a/dom/media/webaudio/test/webaudio.js b/dom/media/webaudio/test/webaudio.js index 049e0e5af3..100c71f320 100644 --- a/dom/media/webaudio/test/webaudio.js +++ b/dom/media/webaudio/test/webaudio.js @@ -42,7 +42,7 @@ function expectRejectedPromise(that, func, exceptionName) { ok(promise instanceof Promise, "Expect a Promise"); promise - .then(function (res) { + .then(function () { ok(false, "Promise resolved when it should have been rejected."); }) .catch(function (err) { |