summaryrefslogtreecommitdiffstats
path: root/dom/media/webaudio/test
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /dom/media/webaudio/test
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--dom/media/webaudio/test/8kHz-320kbps-6ch.aacbin0 -> 22657 bytes
-rw-r--r--dom/media/webaudio/test/audio-expected.wavbin0 -> 190764 bytes
-rw-r--r--dom/media/webaudio/test/audio-mono-expected-2.wavbin0 -> 103788 bytes
-rw-r--r--dom/media/webaudio/test/audio-mono-expected.wavbin0 -> 103788 bytes
-rw-r--r--dom/media/webaudio/test/audio-quad.wavbin0 -> 5128 bytes
-rw-r--r--dom/media/webaudio/test/audio.ogvbin0 -> 16049 bytes
-rw-r--r--dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js3
-rw-r--r--dom/media/webaudio/test/audiovideo.mp4bin0 -> 139713 bytes
-rw-r--r--dom/media/webaudio/test/blink/README9
-rw-r--r--dom/media/webaudio/test/blink/audio-testing.js192
-rw-r--r--dom/media/webaudio/test/blink/biquad-filters.js368
-rw-r--r--dom/media/webaudio/test/blink/biquad-testing.js153
-rw-r--r--dom/media/webaudio/test/blink/convolution-testing.js182
-rw-r--r--dom/media/webaudio/test/blink/mochitest.ini22
-rw-r--r--dom/media/webaudio/test/blink/panner-model-testing.js210
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html32
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html351
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html34
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html261
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html33
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html33
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html34
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html34
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html33
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html34
-rw-r--r--dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html76
-rw-r--r--dom/media/webaudio/test/blink/test_iirFilterNode.html467
-rw-r--r--dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html97
-rw-r--r--dom/media/webaudio/test/corsServer.sjs26
-rw-r--r--dom/media/webaudio/test/file_nodeCreationDocumentGone.html4
-rwxr-xr-xdom/media/webaudio/test/generate-test-files.py52
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-aac-afconvert.mp4bin0 -> 6560 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-aac.aacbin0 -> 4826 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-aac.mp4bin0 -> 5584 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-flac.flacbin0 -> 17320 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-libmp3lame.mp3bin0 -> 4615 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-libopus.mp4bin0 -> 7171 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-libopus.opusbin0 -> 6469 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-libopus.webmbin0 -> 6991 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.oggbin0 -> 4320 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.webmbin0 -> 4878 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-44100.wavbin0 -> 44144 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-aac.aacbin0 -> 4840 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-aac.mp4bin0 -> 5592 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-flac.flacbin0 -> 18577 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-libmp3lame.mp3bin0 -> 4461 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-libopus.mp4bin0 -> 6738 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-libopus.opusbin0 -> 6031 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-libopus.webmbin0 -> 6558 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.oggbin0 -> 4559 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.webmbin0 -> 5142 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-1ch-48000.wavbin0 -> 48044 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-aac.aacbin0 -> 8755 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-aac.mp4bin0 -> 9513 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-flac.flacbin0 -> 23279 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-libmp3lame.mp3bin0 -> 9030 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-libopus.mp4bin0 -> 11593 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-libopus.opusbin0 -> 10905 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-libopus.webmbin0 -> 11413 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.oggbin0 -> 5478 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.webmbin0 -> 6033 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-44100.wavbin0 -> 88244 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-aac.aacbin0 -> 8727 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-aac.mp4bin0 -> 9479 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-flac.flacbin0 -> 24984 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-libmp3lame.mp3bin0 -> 8685 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-libopus.mp4bin0 -> 12247 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-libopus.opusbin0 -> 11559 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-libopus.webmbin0 -> 12067 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.oggbin0 -> 5784 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.webmbin0 -> 6364 bytes
-rw-r--r--dom/media/webaudio/test/half-a-second-2ch-48000.wavbin0 -> 96044 bytes
-rw-r--r--dom/media/webaudio/test/invalid.txt1
-rw-r--r--dom/media/webaudio/test/invalidContent.flac1
-rw-r--r--dom/media/webaudio/test/layouttest-glue.js18
-rw-r--r--dom/media/webaudio/test/mochitest.ini215
-rw-r--r--dom/media/webaudio/test/mochitest_audio.ini69
-rw-r--r--dom/media/webaudio/test/mochitest_bugs.ini65
-rw-r--r--dom/media/webaudio/test/mochitest_media.ini64
-rw-r--r--dom/media/webaudio/test/nil-packet.oggbin0 -> 9760 bytes
-rw-r--r--dom/media/webaudio/test/noaudio.webmbin0 -> 105755 bytes
-rw-r--r--dom/media/webaudio/test/sine-440-10s.opusbin0 -> 94428 bytes
-rw-r--r--dom/media/webaudio/test/sixteen-frames.mp3bin0 -> 625 bytes
-rw-r--r--dom/media/webaudio/test/small-shot-expected.wavbin0 -> 53036 bytes
-rw-r--r--dom/media/webaudio/test/small-shot-mono-expected.wavbin0 -> 26540 bytes
-rw-r--r--dom/media/webaudio/test/small-shot.mp3bin0 -> 6825 bytes
-rw-r--r--dom/media/webaudio/test/small-shot.oggbin0 -> 6416 bytes
-rw-r--r--dom/media/webaudio/test/sweep-300-330-1sec.opusbin0 -> 8889 bytes
-rw-r--r--dom/media/webaudio/test/test_AudioBuffer.html104
-rw-r--r--dom/media/webaudio/test/test_AudioContext.html23
-rw-r--r--dom/media/webaudio/test/test_AudioContext_disabled.html56
-rw-r--r--dom/media/webaudio/test/test_AudioListener.html26
-rw-r--r--dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html59
-rw-r--r--dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html49
-rw-r--r--dom/media/webaudio/test/test_OfflineAudioContext.html118
-rw-r--r--dom/media/webaudio/test/test_ScriptProcessorCollected1.html77
-rw-r--r--dom/media/webaudio/test/test_WebAudioMemoryReporting.html54
-rw-r--r--dom/media/webaudio/test/test_analyserNode.html178
-rw-r--r--dom/media/webaudio/test/test_analyserNodeMinimum.html51
-rw-r--r--dom/media/webaudio/test/test_analyserNodeOutput.html43
-rw-r--r--dom/media/webaudio/test/test_analyserNodePassThrough.html47
-rw-r--r--dom/media/webaudio/test/test_analyserNodeWithGain.html47
-rw-r--r--dom/media/webaudio/test/test_analyserScale.html59
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNode.html44
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html58
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html36
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html47
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html45
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html48
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html44
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html33
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html31
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html55
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html45
-rw-r--r--dom/media/webaudio/test/test_audioBufferSourceNodeRate.html58
-rw-r--r--dom/media/webaudio/test/test_audioContextGC.html162
-rw-r--r--dom/media/webaudio/test/test_audioContextParams_recordNonDefaultSampleRate.html48
-rw-r--r--dom/media/webaudio/test/test_audioContextParams_sampleRate.html81
-rw-r--r--dom/media/webaudio/test/test_audioContextSuspendResumeClose.html419
-rw-r--r--dom/media/webaudio/test/test_audioDestinationNode.html26
-rw-r--r--dom/media/webaudio/test/test_audioParamChaining.html77
-rw-r--r--dom/media/webaudio/test/test_audioParamExponentialRamp.html58
-rw-r--r--dom/media/webaudio/test/test_audioParamGain.html61
-rw-r--r--dom/media/webaudio/test/test_audioParamLinearRamp.html54
-rw-r--r--dom/media/webaudio/test/test_audioParamSetCurveAtTime.html54
-rw-r--r--dom/media/webaudio/test/test_audioParamSetTargetAtTime.html55
-rw-r--r--dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html58
-rw-r--r--dom/media/webaudio/test/test_audioParamSetValueAtTime.html52
-rw-r--r--dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html45
-rw-r--r--dom/media/webaudio/test/test_badConnect.html52
-rw-r--r--dom/media/webaudio/test/test_biquadFilterNode.html86
-rw-r--r--dom/media/webaudio/test/test_biquadFilterNodePassThrough.html47
-rw-r--r--dom/media/webaudio/test/test_biquadFilterNodeWithGain.html61
-rw-r--r--dom/media/webaudio/test/test_bug1027864.html74
-rw-r--r--dom/media/webaudio/test/test_bug1056032.html35
-rw-r--r--dom/media/webaudio/test/test_bug1113634.html58
-rw-r--r--dom/media/webaudio/test/test_bug1118372.html46
-rw-r--r--dom/media/webaudio/test/test_bug1255618.html41
-rw-r--r--dom/media/webaudio/test/test_bug1267579.html46
-rw-r--r--dom/media/webaudio/test/test_bug1355798.html30
-rw-r--r--dom/media/webaudio/test/test_bug1447273.html175
-rw-r--r--dom/media/webaudio/test/test_bug808374.html22
-rw-r--r--dom/media/webaudio/test/test_bug827541.html24
-rw-r--r--dom/media/webaudio/test/test_bug839753.html18
-rw-r--r--dom/media/webaudio/test/test_bug845960.html18
-rw-r--r--dom/media/webaudio/test/test_bug856771.html26
-rw-r--r--dom/media/webaudio/test/test_bug866570.html18
-rw-r--r--dom/media/webaudio/test/test_bug866737.html36
-rw-r--r--dom/media/webaudio/test/test_bug867089.html43
-rw-r--r--dom/media/webaudio/test/test_bug867174.html38
-rw-r--r--dom/media/webaudio/test/test_bug873335.html22
-rw-r--r--dom/media/webaudio/test/test_bug875221.html239
-rw-r--r--dom/media/webaudio/test/test_bug875402.html47
-rw-r--r--dom/media/webaudio/test/test_bug894150.html21
-rw-r--r--dom/media/webaudio/test/test_bug956489.html56
-rw-r--r--dom/media/webaudio/test/test_bug964376.html64
-rw-r--r--dom/media/webaudio/test/test_bug966247.html46
-rw-r--r--dom/media/webaudio/test/test_bug972678.html62
-rw-r--r--dom/media/webaudio/test/test_channelMergerNode.html57
-rw-r--r--dom/media/webaudio/test/test_channelMergerNodeWithVolume.html60
-rw-r--r--dom/media/webaudio/test/test_channelSplitterNode.html71
-rw-r--r--dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html76
-rw-r--r--dom/media/webaudio/test/test_convolver-upmixing-1-channel-response.html143
-rw-r--r--dom/media/webaudio/test/test_convolverNode.html31
-rw-r--r--dom/media/webaudio/test/test_convolverNodeChannelCount.html61
-rw-r--r--dom/media/webaudio/test/test_convolverNodeChannelInterpretationChanges.html169
-rw-r--r--dom/media/webaudio/test/test_convolverNodeDelay.html72
-rw-r--r--dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html44
-rw-r--r--dom/media/webaudio/test/test_convolverNodeNormalization.html83
-rw-r--r--dom/media/webaudio/test/test_convolverNodeOOM.html46
-rw-r--r--dom/media/webaudio/test/test_convolverNodePassThrough.html48
-rw-r--r--dom/media/webaudio/test/test_convolverNodeWithGain.html62
-rw-r--r--dom/media/webaudio/test/test_convolverNode_mono_mono.html73
-rw-r--r--dom/media/webaudio/test/test_currentTime.html27
-rw-r--r--dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html50
-rw-r--r--dom/media/webaudio/test/test_decodeAudioDataPromise.html62
-rw-r--r--dom/media/webaudio/test/test_decodeAudioError.html74
-rw-r--r--dom/media/webaudio/test/test_decodeMultichannel.html75
-rw-r--r--dom/media/webaudio/test/test_decodeOpusTail.html28
-rw-r--r--dom/media/webaudio/test/test_decoderDelay.html144
-rw-r--r--dom/media/webaudio/test/test_delayNode.html101
-rw-r--r--dom/media/webaudio/test/test_delayNodeAtMax.html53
-rw-r--r--dom/media/webaudio/test/test_delayNodeChannelChanges.html98
-rw-r--r--dom/media/webaudio/test/test_delayNodeCycles.html157
-rw-r--r--dom/media/webaudio/test/test_delayNodePassThrough.html53
-rw-r--r--dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html43
-rw-r--r--dom/media/webaudio/test/test_delayNodeTailIncrease.html71
-rw-r--r--dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html95
-rw-r--r--dom/media/webaudio/test/test_delayNodeTailWithGain.html72
-rw-r--r--dom/media/webaudio/test/test_delayNodeTailWithReconnect.html136
-rw-r--r--dom/media/webaudio/test/test_delayNodeWithGain.html54
-rw-r--r--dom/media/webaudio/test/test_delaynode-channel-count-1.html104
-rw-r--r--dom/media/webaudio/test/test_disconnectAll.html51
-rw-r--r--dom/media/webaudio/test/test_disconnectAudioParam.html58
-rw-r--r--dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html67
-rw-r--r--dom/media/webaudio/test/test_disconnectExceptions.html75
-rw-r--r--dom/media/webaudio/test/test_disconnectFromAudioNode.html55
-rw-r--r--dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html59
-rw-r--r--dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html57
-rw-r--r--dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html56
-rw-r--r--dom/media/webaudio/test/test_disconnectFromOutput.html54
-rw-r--r--dom/media/webaudio/test/test_dynamicsCompressorNode.html68
-rw-r--r--dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html47
-rw-r--r--dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html51
-rw-r--r--dom/media/webaudio/test/test_event_listener_leaks.html47
-rw-r--r--dom/media/webaudio/test/test_gainNode.html72
-rw-r--r--dom/media/webaudio/test/test_gainNodeInLoop.html48
-rw-r--r--dom/media/webaudio/test/test_gainNodePassThrough.html49
-rw-r--r--dom/media/webaudio/test/test_iirFilterNodePassThrough.html47
-rw-r--r--dom/media/webaudio/test/test_maxChannelCount.html38
-rw-r--r--dom/media/webaudio/test/test_mediaDecoding.html388
-rw-r--r--dom/media/webaudio/test/test_mediaElementAudioSourceNode.html74
-rw-r--r--dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html94
-rw-r--r--dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html137
-rw-r--r--dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html66
-rw-r--r--dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html70
-rw-r--r--dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html50
-rw-r--r--dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html50
-rw-r--r--dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html60
-rw-r--r--dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html116
-rw-r--r--dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html55
-rw-r--r--dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html74
-rw-r--r--dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNode.html54
-rw-r--r--dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeCrossOrigin.html53
-rw-r--r--dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeVideo.html27
-rw-r--r--dom/media/webaudio/test/test_mixingRules.html402
-rw-r--r--dom/media/webaudio/test/test_nodeCreationDocumentGone.html34
-rw-r--r--dom/media/webaudio/test/test_nodeToParamConnection.html60
-rw-r--r--dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html57
-rw-r--r--dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html42
-rw-r--r--dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html46
-rw-r--r--dom/media/webaudio/test/test_oscillatorNode.html60
-rw-r--r--dom/media/webaudio/test/test_oscillatorNode2.html53
-rw-r--r--dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html50
-rw-r--r--dom/media/webaudio/test/test_oscillatorNodePassThrough.html43
-rw-r--r--dom/media/webaudio/test/test_oscillatorNodeStart.html38
-rw-r--r--dom/media/webaudio/test/test_oscillatorTypeChange.html58
-rw-r--r--dom/media/webaudio/test/test_pannerNode.html71
-rw-r--r--dom/media/webaudio/test/test_pannerNodeAbove.html50
-rw-r--r--dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html149
-rw-r--r--dom/media/webaudio/test/test_pannerNodeChannelCount.html52
-rw-r--r--dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html107
-rw-r--r--dom/media/webaudio/test/test_pannerNodePassThrough.html53
-rw-r--r--dom/media/webaudio/test/test_pannerNodeTail.html232
-rw-r--r--dom/media/webaudio/test/test_pannerNode_audioparam_distance.html43
-rw-r--r--dom/media/webaudio/test/test_pannerNode_equalPower.html26
-rw-r--r--dom/media/webaudio/test/test_pannerNode_maxDistance.html64
-rw-r--r--dom/media/webaudio/test/test_periodicWave.html130
-rw-r--r--dom/media/webaudio/test/test_periodicWaveBandLimiting.html86
-rw-r--r--dom/media/webaudio/test/test_periodicWaveDisableNormalization.html98
-rw-r--r--dom/media/webaudio/test/test_retrospective-exponentialRampToValueAtTime.html51
-rw-r--r--dom/media/webaudio/test/test_retrospective-linearRampToValueAtTime.html51
-rw-r--r--dom/media/webaudio/test/test_retrospective-setTargetAtTime.html51
-rw-r--r--dom/media/webaudio/test/test_retrospective-setValueAtTime.html54
-rw-r--r--dom/media/webaudio/test/test_retrospective-setValueCurveAtTime.html49
-rw-r--r--dom/media/webaudio/test/test_scriptProcessorNode.html132
-rw-r--r--dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html80
-rw-r--r--dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html34
-rw-r--r--dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html103
-rw-r--r--dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html39
-rw-r--r--dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html52
-rw-r--r--dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html72
-rw-r--r--dom/media/webaudio/test/test_setValueCurveWithNonFiniteElements.html60
-rw-r--r--dom/media/webaudio/test/test_singleSourceDest.html70
-rw-r--r--dom/media/webaudio/test/test_slowStart.html48
-rw-r--r--dom/media/webaudio/test/test_stereoPannerNode.html295
-rw-r--r--dom/media/webaudio/test/test_stereoPannerNodePassThrough.html47
-rw-r--r--dom/media/webaudio/test/test_stereoPanningWithGain.html49
-rw-r--r--dom/media/webaudio/test/test_waveDecoder.html69
-rw-r--r--dom/media/webaudio/test/test_waveShaper.html60
-rw-r--r--dom/media/webaudio/test/test_waveShaperGain.html73
-rw-r--r--dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html66
-rw-r--r--dom/media/webaudio/test/test_waveShaperNoCurve.html43
-rw-r--r--dom/media/webaudio/test/test_waveShaperPassThrough.html55
-rw-r--r--dom/media/webaudio/test/test_webAudio_muteTab.html95
-rw-r--r--dom/media/webaudio/test/ting-44.1k-1ch.oggbin0 -> 8566 bytes
-rw-r--r--dom/media/webaudio/test/ting-44.1k-1ch.wavbin0 -> 61228 bytes
-rw-r--r--dom/media/webaudio/test/ting-44.1k-2ch.oggbin0 -> 10422 bytes
-rw-r--r--dom/media/webaudio/test/ting-44.1k-2ch.wavbin0 -> 122412 bytes
-rw-r--r--dom/media/webaudio/test/ting-48k-1ch.oggbin0 -> 8680 bytes
-rw-r--r--dom/media/webaudio/test/ting-48k-1ch.wavbin0 -> 66638 bytes
-rw-r--r--dom/media/webaudio/test/ting-48k-2ch.oggbin0 -> 10701 bytes
-rw-r--r--dom/media/webaudio/test/ting-48k-2ch.wavbin0 -> 133232 bytes
-rw-r--r--dom/media/webaudio/test/ting-dualchannel44.1.wavbin0 -> 122412 bytes
-rw-r--r--dom/media/webaudio/test/ting-dualchannel48.wavbin0 -> 122412 bytes
-rw-r--r--dom/media/webaudio/test/webaudio.js319
286 files changed, 16690 insertions, 0 deletions
diff --git a/dom/media/webaudio/test/8kHz-320kbps-6ch.aac b/dom/media/webaudio/test/8kHz-320kbps-6ch.aac
new file mode 100644
index 0000000000..8981d40dfd
--- /dev/null
+++ b/dom/media/webaudio/test/8kHz-320kbps-6ch.aac
Binary files differ
diff --git a/dom/media/webaudio/test/audio-expected.wav b/dom/media/webaudio/test/audio-expected.wav
new file mode 100644
index 0000000000..1519270776
--- /dev/null
+++ b/dom/media/webaudio/test/audio-expected.wav
Binary files differ
diff --git a/dom/media/webaudio/test/audio-mono-expected-2.wav b/dom/media/webaudio/test/audio-mono-expected-2.wav
new file mode 100644
index 0000000000..68c90dfa1e
--- /dev/null
+++ b/dom/media/webaudio/test/audio-mono-expected-2.wav
Binary files differ
diff --git a/dom/media/webaudio/test/audio-mono-expected.wav b/dom/media/webaudio/test/audio-mono-expected.wav
new file mode 100644
index 0000000000..bf00e5cdf2
--- /dev/null
+++ b/dom/media/webaudio/test/audio-mono-expected.wav
Binary files differ
diff --git a/dom/media/webaudio/test/audio-quad.wav b/dom/media/webaudio/test/audio-quad.wav
new file mode 100644
index 0000000000..093f0197ae
--- /dev/null
+++ b/dom/media/webaudio/test/audio-quad.wav
Binary files differ
diff --git a/dom/media/webaudio/test/audio.ogv b/dom/media/webaudio/test/audio.ogv
new file mode 100644
index 0000000000..68dee3cf2b
--- /dev/null
+++ b/dom/media/webaudio/test/audio.ogv
Binary files differ
diff --git a/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js b/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js
new file mode 100644
index 0000000000..2a5a4bff89
--- /dev/null
+++ b/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js
@@ -0,0 +1,3 @@
+onmessage = function (event) {
+ postMessage("Pong");
+};
diff --git a/dom/media/webaudio/test/audiovideo.mp4 b/dom/media/webaudio/test/audiovideo.mp4
new file mode 100644
index 0000000000..fe93122d29
--- /dev/null
+++ b/dom/media/webaudio/test/audiovideo.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/blink/README b/dom/media/webaudio/test/blink/README
new file mode 100644
index 0000000000..1d819221fd
--- /dev/null
+++ b/dom/media/webaudio/test/blink/README
@@ -0,0 +1,9 @@
+This directory contains tests originally borrowed from the Blink Web Audio test
+suite.
+
+The process of borrowing tests from Blink is as follows:
+
+* Import the pristine file from the Blink repo, noting the revision in the
+ commit message.
+* Modify the test files to turn the LayoutTest into a mochitest-plain and add
+* them to the test suite in a separate commit.
diff --git a/dom/media/webaudio/test/blink/audio-testing.js b/dom/media/webaudio/test/blink/audio-testing.js
new file mode 100644
index 0000000000..c66d32c7f2
--- /dev/null
+++ b/dom/media/webaudio/test/blink/audio-testing.js
@@ -0,0 +1,192 @@
+if (window.testRunner)
+ testRunner.overridePreference("WebKitWebAudioEnabled", "1");
+
+function writeString(s, a, offset) {
+ for (var i = 0; i < s.length; ++i) {
+ a[offset + i] = s.charCodeAt(i);
+ }
+}
+
+function writeInt16(n, a, offset) {
+ n = Math.floor(n);
+
+ var b1 = n & 255;
+ var b2 = (n >> 8) & 255;
+
+ a[offset + 0] = b1;
+ a[offset + 1] = b2;
+}
+
+function writeInt32(n, a, offset) {
+ n = Math.floor(n);
+ var b1 = n & 255;
+ var b2 = (n >> 8) & 255;
+ var b3 = (n >> 16) & 255;
+ var b4 = (n >> 24) & 255;
+
+ a[offset + 0] = b1;
+ a[offset + 1] = b2;
+ a[offset + 2] = b3;
+ a[offset + 3] = b4;
+}
+
+function writeAudioBuffer(audioBuffer, a, offset) {
+ var n = audioBuffer.length;
+ var channels = audioBuffer.numberOfChannels;
+
+ for (var i = 0; i < n; ++i) {
+ for (var k = 0; k < channels; ++k) {
+ var buffer = audioBuffer.getChannelData(k);
+ var sample = buffer[i] * 32768.0;
+
+ // Clip samples to the limitations of 16-bit.
+ // If we don't do this then we'll get nasty wrap-around distortion.
+ if (sample < -32768)
+ sample = -32768;
+ if (sample > 32767)
+ sample = 32767;
+
+ writeInt16(sample, a, offset);
+ offset += 2;
+ }
+ }
+}
+
+function createWaveFileData(audioBuffer) {
+ var frameLength = audioBuffer.length;
+ var numberOfChannels = audioBuffer.numberOfChannels;
+ var sampleRate = audioBuffer.sampleRate;
+ var bitsPerSample = 16;
+ var byteRate = sampleRate * numberOfChannels * bitsPerSample/8;
+ var blockAlign = numberOfChannels * bitsPerSample/8;
+ var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio
+ var headerByteLength = 44;
+ var totalLength = headerByteLength + wavDataByteLength;
+
+ var waveFileData = new Uint8Array(totalLength);
+
+ var subChunk1Size = 16; // for linear PCM
+ var subChunk2Size = wavDataByteLength;
+ var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size);
+
+ writeString("RIFF", waveFileData, 0);
+ writeInt32(chunkSize, waveFileData, 4);
+ writeString("WAVE", waveFileData, 8);
+ writeString("fmt ", waveFileData, 12);
+
+ writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4)
+ writeInt16(1, waveFileData, 20); // AudioFormat (2)
+ writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2)
+ writeInt32(sampleRate, waveFileData, 24); // SampleRate (4)
+ writeInt32(byteRate, waveFileData, 28); // ByteRate (4)
+ writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2)
+ writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4)
+
+ writeString("data", waveFileData, 36);
+ writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4)
+
+ // Write actual audio data starting at offset 44.
+ writeAudioBuffer(audioBuffer, waveFileData, 44);
+
+ return waveFileData;
+}
+
+function createAudioData(audioBuffer) {
+ return createWaveFileData(audioBuffer);
+}
+
+function finishAudioTest(event) {
+ var audioData = createAudioData(event.renderedBuffer);
+ testRunner.setAudioData(audioData);
+ testRunner.notifyDone();
+}
+
+// Create an impulse in a buffer of length sampleFrameLength
+function createImpulseBuffer(context, sampleFrameLength) {
+ var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate);
+ var n = audioBuffer.length;
+ var dataL = audioBuffer.getChannelData(0);
+
+ for (var k = 0; k < n; ++k) {
+ dataL[k] = 0;
+ }
+ dataL[0] = 1;
+
+ return audioBuffer;
+}
+
+// Create a buffer of the given length with a linear ramp having values 0 <= x < 1.
+function createLinearRampBuffer(context, sampleFrameLength) {
+ var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate);
+ var n = audioBuffer.length;
+ var dataL = audioBuffer.getChannelData(0);
+
+ for (var i = 0; i < n; ++i)
+ dataL[i] = i / n;
+
+ return audioBuffer;
+}
+
+// Create a buffer of the given length having a constant value.
+function createConstantBuffer(context, sampleFrameLength, constantValue) {
+ var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate);
+ var n = audioBuffer.length;
+ var dataL = audioBuffer.getChannelData(0);
+
+ for (var i = 0; i < n; ++i)
+ dataL[i] = constantValue;
+
+ return audioBuffer;
+}
+
+// Create a stereo impulse in a buffer of length sampleFrameLength
+function createStereoImpulseBuffer(context, sampleFrameLength) {
+ var audioBuffer = context.createBuffer(2, sampleFrameLength, context.sampleRate);
+ var n = audioBuffer.length;
+ var dataL = audioBuffer.getChannelData(0);
+ var dataR = audioBuffer.getChannelData(1);
+
+ for (var k = 0; k < n; ++k) {
+ dataL[k] = 0;
+ dataR[k] = 0;
+ }
+ dataL[0] = 1;
+ dataR[0] = 1;
+
+ return audioBuffer;
+}
+
+// Convert time (in seconds) to sample frames.
+function timeToSampleFrame(time, sampleRate) {
+ return Math.floor(0.5 + time * sampleRate);
+}
+
+// Compute the number of sample frames consumed by start with
+// the specified |grainOffset|, |duration|, and |sampleRate|.
+function grainLengthInSampleFrames(grainOffset, duration, sampleRate) {
+ var startFrame = timeToSampleFrame(grainOffset, sampleRate);
+ var endFrame = timeToSampleFrame(grainOffset + duration, sampleRate);
+
+ return endFrame - startFrame;
+}
+
+// True if the number is not an infinity or NaN
+function isValidNumber(x) {
+ return !isNaN(x) && (x != Infinity) && (x != -Infinity);
+}
+
+function shouldThrowTypeError(func, text) {
+ var ok = false;
+ try {
+ func();
+ } catch (e) {
+ if (e instanceof TypeError) {
+ ok = true;
+ }
+ }
+ if (ok) {
+ testPassed(text + " threw TypeError.");
+ } else {
+ testFailed(text + " should throw TypeError.");
+ }
+}
diff --git a/dom/media/webaudio/test/blink/biquad-filters.js b/dom/media/webaudio/test/blink/biquad-filters.js
new file mode 100644
index 0000000000..06fff98b18
--- /dev/null
+++ b/dom/media/webaudio/test/blink/biquad-filters.js
@@ -0,0 +1,368 @@
+// Taken from WebKit/LayoutTests/webaudio/resources/biquad-filters.js
+
+// A biquad filter has a z-transform of
+// H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2)
+//
+// The formulas for the various filters were taken from
+// http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt.
+
+
+// Lowpass filter.
+function createLowpassFilter(freq, q, gain) {
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+
+ if (freq == 1) {
+ // The formula below works, except for roundoff. When freq = 1,
+ // the filter is just a wire, so hardwire the coefficients.
+ b0 = 1;
+ b1 = 0;
+ b2 = 0;
+ a0 = 1;
+ a1 = 0;
+ a2 = 0;
+ } else {
+ var w0 = Math.PI * freq;
+ var alpha = 0.5 * Math.sin(w0) / Math.pow(10, q / 20);
+ var cos_w0 = Math.cos(w0);
+
+ b0 = 0.5 * (1 - cos_w0);
+ b1 = 1 - cos_w0;
+ b2 = b0;
+ a0 = 1 + alpha;
+ a1 = -2.0 * cos_w0;
+ a2 = 1 - alpha;
+ }
+
+ return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+}
+
+function createHighpassFilter(freq, q, gain) {
+ var b0;
+ var b1;
+ var b2;
+ var a1;
+ var a2;
+
+ if (freq == 1) {
+ // The filter is 0
+ b0 = 0;
+ b1 = 0;
+ b2 = 0;
+ a0 = 1;
+ a1 = 0;
+ a2 = 0;
+ } else if (freq == 0) {
+ // The filter is 1. Computation of coefficients below is ok, but
+ // there's a pole at 1 and a zero at 1, so round-off could make
+ // the filter unstable.
+ b0 = 1;
+ b1 = 0;
+ b2 = 0;
+ a0 = 1;
+ a1 = 0;
+ a2 = 0;
+ } else {
+ var w0 = Math.PI * freq;
+ var alpha = 0.5 * Math.sin(w0) / Math.pow(10, q / 20);
+ var cos_w0 = Math.cos(w0);
+
+ b0 = 0.5 * (1 + cos_w0);
+ b1 = -1 - cos_w0;
+ b2 = b0;
+ a0 = 1 + alpha;
+ a1 = -2.0 * cos_w0;
+ a2 = 1 - alpha;
+ }
+
+ return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+}
+
+function normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2) {
+ var scale = 1 / a0;
+
+ return {b0 : b0 * scale,
+ b1 : b1 * scale,
+ b2 : b2 * scale,
+ a1 : a1 * scale,
+ a2 : a2 * scale};
+}
+
+function createBandpassFilter(freq, q, gain) {
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+ var coef;
+
+ if (freq > 0 && freq < 1) {
+ var w0 = Math.PI * freq;
+ if (q > 0) {
+ var alpha = Math.sin(w0) / (2 * q);
+ var k = Math.cos(w0);
+
+ b0 = alpha;
+ b1 = 0;
+ b2 = -alpha;
+ a0 = 1 + alpha;
+ a1 = -2 * k;
+ a2 = 1 - alpha;
+
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // q = 0, and frequency is not 0 or 1. The above formula has a
+ // divide by zero problem. The limit of the z-transform as q
+ // approaches 0 is 1, so set the filter that way.
+ coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+ } else {
+ // When freq = 0 or 1, the z-transform is identically 0,
+ // independent of q.
+ coef = {b0 : 0, b1 : 0, b2 : 0, a1 : 0, a2 : 0}
+ }
+
+ return coef;
+}
+
+function createLowShelfFilter(freq, q, gain) {
+ // q not used
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+ var coef;
+
+ var S = 1;
+ var A = Math.pow(10, gain / 40);
+
+ if (freq == 1) {
+ // The filter is just a constant gain
+ coef = {b0 : A * A, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ } else if (freq == 0) {
+ // The filter is 1
+ coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ } else {
+ var w0 = Math.PI * freq;
+ var alpha = 1 / 2 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2);
+ var k = Math.cos(w0);
+ var k2 = 2 * Math.sqrt(A) * alpha;
+ var Ap1 = A + 1;
+ var Am1 = A - 1;
+
+ b0 = A * (Ap1 - Am1 * k + k2);
+ b1 = 2 * A * (Am1 - Ap1 * k);
+ b2 = A * (Ap1 - Am1 * k - k2);
+ a0 = Ap1 + Am1 * k + k2;
+ a1 = -2 * (Am1 + Ap1 * k);
+ a2 = Ap1 + Am1 * k - k2;
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ }
+
+ return coef;
+}
+
+function createHighShelfFilter(freq, q, gain) {
+ // q not used
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+ var coef;
+
+ var A = Math.pow(10, gain / 40);
+
+ if (freq == 1) {
+ // When freq = 1, the z-transform is 1
+ coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ } else if (freq > 0) {
+ var w0 = Math.PI * freq;
+ var S = 1;
+ var alpha = 0.5 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2);
+ var k = Math.cos(w0);
+ var k2 = 2 * Math.sqrt(A) * alpha;
+ var Ap1 = A + 1;
+ var Am1 = A - 1;
+
+ b0 = A * (Ap1 + Am1 * k + k2);
+ b1 = -2 * A * (Am1 + Ap1 * k);
+ b2 = A * (Ap1 + Am1 * k - k2);
+ a0 = Ap1 - Am1 * k + k2;
+ a1 = 2 * (Am1 - Ap1*k);
+ a2 = Ap1 - Am1 * k-k2;
+
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // When freq = 0, the filter is just a gain
+ coef = {b0 : A * A, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+
+ return coef;
+}
+
+function createPeakingFilter(freq, q, gain) {
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+ var coef;
+
+ var A = Math.pow(10, gain / 40);
+
+ if (freq > 0 && freq < 1) {
+ if (q > 0) {
+ var w0 = Math.PI * freq;
+ var alpha = Math.sin(w0) / (2 * q);
+ var k = Math.cos(w0);
+
+ b0 = 1 + alpha * A;
+ b1 = -2 * k;
+ b2 = 1 - alpha * A;
+ a0 = 1 + alpha / A;
+ a1 = -2 * k;
+ a2 = 1 - alpha / A;
+
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // q = 0, we have a divide by zero problem in the formulas
+ // above. But if we look at the z-transform, we see that the
+ // limit as q approaches 0 is A^2.
+ coef = {b0 : A * A, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+ } else {
+ // freq = 0 or 1, the z-transform is 1
+ coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+
+ return coef;
+}
+
+function createNotchFilter(freq, q, gain) {
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+ var coef;
+
+ if (freq > 0 && freq < 1) {
+ if (q > 0) {
+ var w0 = Math.PI * freq;
+ var alpha = Math.sin(w0) / (2 * q);
+ var k = Math.cos(w0);
+
+ b0 = 1;
+ b1 = -2 * k;
+ b2 = 1;
+ a0 = 1 + alpha;
+ a1 = -2 * k;
+ a2 = 1 - alpha;
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // When q = 0, we get a divide by zero above. The limit of the
+ // z-transform as q approaches 0 is 0, so set the coefficients
+ // appropriately.
+ coef = {b0 : 0, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+ } else {
+ // When freq = 0 or 1, the z-transform is 1
+ coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+
+ return coef;
+}
+
+function createAllpassFilter(freq, q, gain) {
+ var b0;
+ var b1;
+ var b2;
+ var a0;
+ var a1;
+ var a2;
+ var coef;
+
+ if (freq > 0 && freq < 1) {
+ if (q > 0) {
+ var w0 = Math.PI * freq;
+ var alpha = Math.sin(w0) / (2 * q);
+ var k = Math.cos(w0);
+
+ b0 = 1 - alpha;
+ b1 = -2 * k;
+ b2 = 1 + alpha;
+ a0 = 1 + alpha;
+ a1 = -2 * k;
+ a2 = 1 - alpha;
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // q = 0
+ coef = {b0 : -1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+ } else {
+ coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0};
+ }
+
+ return coef;
+}
+
+function filterData(filterCoef, signal, len) {
+ var y = new Array(len);
+ var b0 = filterCoef.b0;
+ var b1 = filterCoef.b1;
+ var b2 = filterCoef.b2;
+ var a1 = filterCoef.a1;
+ var a2 = filterCoef.a2;
+
+ // Prime the pump. (Assumes the signal has length >= 2!)
+ y[0] = b0 * signal[0];
+ y[1] = b0 * signal[1] + b1 * signal[0] - a1 * y[0];
+
+ // Filter all of the signal that we have.
+ for (var k = 2; k < Math.min(signal.length, len); ++k) {
+ y[k] = b0 * signal[k] + b1 * signal[k-1] + b2 * signal[k-2] - a1 * y[k-1] - a2 * y[k-2];
+ }
+
+ // If we need to filter more, but don't have any signal left,
+ // assume the signal is zero.
+ for (var k = signal.length; k < len; ++k) {
+ y[k] = - a1 * y[k-1] - a2 * y[k-2];
+ }
+
+ return y;
+}
+
+// Map the filter type name to a function that computes the filter coefficents for the given filter
+// type.
+var filterCreatorFunction = {"lowpass": createLowpassFilter,
+ "highpass": createHighpassFilter,
+ "bandpass": createBandpassFilter,
+ "lowshelf": createLowShelfFilter,
+ "highshelf": createHighShelfFilter,
+ "peaking": createPeakingFilter,
+ "notch": createNotchFilter,
+ "allpass": createAllpassFilter};
+
+var filterTypeName = {"lowpass": "Lowpass filter",
+ "highpass": "Highpass filter",
+ "bandpass": "Bandpass filter",
+ "lowshelf": "Lowshelf filter",
+ "highshelf": "Highshelf filter",
+ "peaking": "Peaking filter",
+ "notch": "Notch filter",
+ "allpass": "Allpass filter"};
+
+function createFilter(filterType, freq, q, gain) {
+ return filterCreatorFunction[filterType](freq, q, gain);
+}
diff --git a/dom/media/webaudio/test/blink/biquad-testing.js b/dom/media/webaudio/test/blink/biquad-testing.js
new file mode 100644
index 0000000000..795adf6012
--- /dev/null
+++ b/dom/media/webaudio/test/blink/biquad-testing.js
@@ -0,0 +1,153 @@
+// Globals, to make testing and debugging easier.
+var context;
+var filter;
+var signal;
+var renderedBuffer;
+var renderedData;
+
+var sampleRate = 44100.0;
+var pulseLengthFrames = .1 * sampleRate;
+
+// Maximum allowed error for the test to succeed. Experimentally determined.
+var maxAllowedError = 5.9e-8;
+
+// This must be large enough so that the filtered result is
+// essentially zero. See comments for createTestAndRun.
+var timeStep = .1;
+
+// Maximum number of filters we can process (mostly for setting the
+// render length correctly.)
+var maxFilters = 5;
+
+// How long to render. Must be long enough for all of the filters we
+// want to test.
+var renderLengthSeconds = timeStep * (maxFilters + 1) ;
+
+var renderLengthSamples = Math.round(renderLengthSeconds * sampleRate);
+
+// Number of filters that will be processed.
+var nFilters;
+
+function createImpulseBuffer(context, length) {
+ var impulse = context.createBuffer(1, length, context.sampleRate);
+ var data = impulse.getChannelData(0);
+ for (var k = 1; k < data.length; ++k) {
+ data[k] = 0;
+ }
+ data[0] = 1;
+
+ return impulse;
+}
+
+
+function createTestAndRun(context, filterType, filterParameters) {
+ // To test the filters, we apply a signal (an impulse) to each of
+ // the specified filters, with each signal starting at a different
+ // time. The output of the filters is summed together at the
+ // output. Thus for filter k, the signal input to the filter
+ // starts at time k * timeStep. For this to work well, timeStep
+ // must be large enough for the output of each filter to have
+ // decayed to zero with timeStep seconds. That way the filter
+ // outputs don't interfere with each other.
+
+ nFilters = Math.min(filterParameters.length, maxFilters);
+
+ signal = new Array(nFilters);
+ filter = new Array(nFilters);
+
+ impulse = createImpulseBuffer(context, pulseLengthFrames);
+
+ // Create all of the signal sources and filters that we need.
+ for (var k = 0; k < nFilters; ++k) {
+ signal[k] = context.createBufferSource();
+ signal[k].buffer = impulse;
+
+ filter[k] = context.createBiquadFilter();
+ filter[k].type = filterType;
+ filter[k].frequency.value = context.sampleRate / 2 * filterParameters[k].cutoff;
+ filter[k].detune.value = (filterParameters[k].detune === undefined) ? 0 : filterParameters[k].detune;
+ filter[k].Q.value = filterParameters[k].q;
+ filter[k].gain.value = filterParameters[k].gain;
+
+ signal[k].connect(filter[k]);
+ filter[k].connect(context.destination);
+
+ signal[k].start(timeStep * k);
+ }
+
+ context.oncomplete = checkFilterResponse(filterType, filterParameters);
+ context.startRendering();
+}
+
+function addSignal(dest, src, destOffset) {
+ // Add src to dest at the given dest offset.
+ for (var k = destOffset, j = 0; k < dest.length, j < src.length; ++k, ++j) {
+ dest[k] += src[j];
+ }
+}
+
+function generateReference(filterType, filterParameters) {
+ var result = new Array(renderLengthSamples);
+ var data = new Array(renderLengthSamples);
+ // Initialize the result array and data.
+ for (var k = 0; k < result.length; ++k) {
+ result[k] = 0;
+ data[k] = 0;
+ }
+ // Make data an impulse.
+ data[0] = 1;
+
+ for (var k = 0; k < nFilters; ++k) {
+ // Filter an impulse
+ var detune = (filterParameters[k].detune === undefined) ? 0 : filterParameters[k].detune;
+ var frequency = filterParameters[k].cutoff * Math.pow(2, detune / 1200); // Apply detune, converting from Cents.
+
+ var filterCoef = createFilter(filterType,
+ frequency,
+ filterParameters[k].q,
+ filterParameters[k].gain);
+ var y = filterData(filterCoef, data, renderLengthSamples);
+
+ // Accumulate this filtered data into the final output at the desired offset.
+ addSignal(result, y, timeToSampleFrame(timeStep * k, sampleRate));
+ }
+
+ return result;
+}
+
+function checkFilterResponse(filterType, filterParameters) {
+ return function(event) {
+ renderedBuffer = event.renderedBuffer;
+ renderedData = renderedBuffer.getChannelData(0);
+
+ reference = generateReference(filterType, filterParameters);
+
+ var len = Math.min(renderedData.length, reference.length);
+
+ var success = true;
+
+ // Maximum error between rendered data and expected data
+ var maxError = 0;
+
+ // Sample offset where the maximum error occurred.
+ var maxPosition = 0;
+
+ // Number of infinities or NaNs that occurred in the rendered data.
+ var invalidNumberCount = 0;
+
+ ok(nFilters == filterParameters.length, "Test wanted " + filterParameters.length + " filters but only " + maxFilters + " allowed.");
+
+ compareChannels(renderedData, reference, len, 0, 0, true);
+
+ // Check for bad numbers in the rendered output too.
+ // There shouldn't be any.
+ for (var k = 0; k < len; ++k) {
+ if (!isValidNumber(renderedData[k])) {
+ ++invalidNumberCount;
+ }
+ }
+
+ ok(invalidNumberCount == 0, "Rendered output has " + invalidNumberCount + " infinities or NaNs.");
+ SimpleTest.finish();
+ }
+}
diff --git a/dom/media/webaudio/test/blink/convolution-testing.js b/dom/media/webaudio/test/blink/convolution-testing.js
new file mode 100644
index 0000000000..98ff0c7756
--- /dev/null
+++ b/dom/media/webaudio/test/blink/convolution-testing.js
@@ -0,0 +1,182 @@
+var sampleRate = 44100.0;
+
+var renderLengthSeconds = 8;
+var pulseLengthSeconds = 1;
+var pulseLengthFrames = pulseLengthSeconds * sampleRate;
+
+function createSquarePulseBuffer(context, sampleFrameLength) {
+ var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate);
+
+ var n = audioBuffer.length;
+ var data = audioBuffer.getChannelData(0);
+
+ for (var i = 0; i < n; ++i)
+ data[i] = 1;
+
+ return audioBuffer;
+}
+
+// The triangle buffer holds the expected result of the convolution.
+// It linearly ramps up from 0 to its maximum value (at the center)
+// then linearly ramps down to 0. The center value corresponds to the
+// point where the two square pulses overlap the most.
+function createTrianglePulseBuffer(context, sampleFrameLength) {
+ var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate);
+
+ var n = audioBuffer.length;
+ var halfLength = n / 2;
+ var data = audioBuffer.getChannelData(0);
+
+ for (var i = 0; i < halfLength; ++i)
+ data[i] = i + 1;
+
+ for (var i = halfLength; i < n; ++i)
+ data[i] = n - i - 1;
+
+ return audioBuffer;
+}
+
+function log10(x) {
+ return Math.log(x)/Math.LN10;
+}
+
+function linearToDecibel(x) {
+ return 20*log10(x);
+}
+
+// Verify that the rendered result is very close to the reference
+// triangular pulse.
+function checkTriangularPulse(rendered, reference) {
+ var match = true;
+ var maxDelta = 0;
+ var valueAtMaxDelta = 0;
+ var maxDeltaIndex = 0;
+
+ for (var i = 0; i < reference.length; ++i) {
+ var diff = rendered[i] - reference[i];
+ var x = Math.abs(diff);
+ if (x > maxDelta) {
+ maxDelta = x;
+ valueAtMaxDelta = reference[i];
+ maxDeltaIndex = i;
+ }
+ }
+
+ // allowedDeviationFraction was determined experimentally. It
+ // is the threshold of the relative error at the maximum
+ // difference between the true triangular pulse and the
+ // rendered pulse.
+ var allowedDeviationDecibels = -129.4;
+ var maxDeviationDecibels = linearToDecibel(maxDelta / valueAtMaxDelta);
+
+ if (maxDeviationDecibels <= allowedDeviationDecibels) {
+ testPassed("Triangular portion of convolution is correct.");
+ } else {
+ testFailed("Triangular portion of convolution is not correct. Max deviation = " + maxDeviationDecibels + " dB at " + maxDeltaIndex);
+ match = false;
+ }
+
+ return match;
+}
+
+// Verify that the rendered data is close to zero for the first part
+// of the tail.
+function checkTail1(data, reference, breakpoint) {
+ var isZero = true;
+ var tail1Max = 0;
+
+ for (var i = reference.length; i < reference.length + breakpoint; ++i) {
+ var mag = Math.abs(data[i]);
+ if (mag > tail1Max) {
+ tail1Max = mag;
+ }
+ }
+
+ // Let's find the peak of the reference (even though we know a
+ // priori what it is).
+ var refMax = 0;
+ for (var i = 0; i < reference.length; ++i) {
+ refMax = Math.max(refMax, Math.abs(reference[i]));
+ }
+
+ // This threshold is experimentally determined by examining the
+ // value of tail1MaxDecibels.
+ var threshold1 = -129.7;
+
+ var tail1MaxDecibels = linearToDecibel(tail1Max/refMax);
+ if (tail1MaxDecibels <= threshold1) {
+ testPassed("First part of tail of convolution is sufficiently small.");
+ } else {
+ testFailed("First part of tail of convolution is not sufficiently small: " + tail1MaxDecibels + " dB");
+ isZero = false;
+ }
+
+ return isZero;
+}
+
+// Verify that the second part of the tail of the convolution is
+// exactly zero.
+function checkTail2(data, reference, breakpoint) {
+ var isZero = true;
+ var tail2Max = 0;
+ // For the second part of the tail, the maximum value should be
+ // exactly zero.
+ var threshold2 = 0;
+ for (var i = reference.length + breakpoint; i < data.length; ++i) {
+ if (Math.abs(data[i]) > 0) {
+ isZero = false;
+ break;
+ }
+ }
+
+ if (isZero) {
+ testPassed("Rendered signal after tail of convolution is silent.");
+ } else {
+ testFailed("Rendered signal after tail of convolution should be silent.");
+ }
+
+ return isZero;
+}
+
+function checkConvolvedResult(trianglePulse) {
+ return function(event) {
+ var renderedBuffer = event.renderedBuffer;
+
+ var referenceData = trianglePulse.getChannelData(0);
+ var renderedData = renderedBuffer.getChannelData(0);
+
+ var success = true;
+
+ // Verify the triangular pulse is actually triangular.
+
+ success = success && checkTriangularPulse(renderedData, referenceData);
+
+ // Make sure that portion after convolved portion is totally
+ // silent. But round-off prevents this from being completely
+ // true. At the end of the triangle, it should be close to
+ // zero. If we go farther out, it should be even closer and
+ // eventually zero.
+
+ // For the tail of the convolution (where the result would be
+ // theoretically zero), we partition the tail into two
+ // parts. The first is the at the beginning of the tail,
+ // where we tolerate a small but non-zero value. The second part is
+ // farther along the tail where the result should be zero.
+
+ // breakpoint is the point dividing the first two tail parts
+ // we're looking at. Experimentally determined.
+ var breakpoint = 12800;
+
+ success = success && checkTail1(renderedData, referenceData, breakpoint);
+
+ success = success && checkTail2(renderedData, referenceData, breakpoint);
+
+ if (success) {
+ testPassed("Test signal was correctly convolved.");
+ } else {
+ testFailed("Test signal was not correctly convolved.");
+ }
+
+ finishJSTest();
+ }
+}
diff --git a/dom/media/webaudio/test/blink/mochitest.ini b/dom/media/webaudio/test/blink/mochitest.ini
new file mode 100644
index 0000000000..8d115b2e8e
--- /dev/null
+++ b/dom/media/webaudio/test/blink/mochitest.ini
@@ -0,0 +1,22 @@
+[DEFAULT]
+tags = mtg webaudio
+subsuite = media
+support-files =
+ biquad-filters.js
+ biquad-testing.js
+ ../webaudio.js
+
+[test_biquadFilterNodeAllPass.html]
+[test_biquadFilterNodeAutomation.html]
+skip-if = true # Known problems with Biquad automation, e.g. Bug 1155709
+[test_biquadFilterNodeBandPass.html]
+[test_biquadFilterNodeGetFrequencyResponse.html]
+[test_biquadFilterNodeHighPass.html]
+[test_biquadFilterNodeHighShelf.html]
+[test_biquadFilterNodeLowPass.html]
+[test_biquadFilterNodeLowShelf.html]
+[test_biquadFilterNodeNotch.html]
+[test_biquadFilterNodePeaking.html]
+[test_biquadFilterNodeTail.html]
+[test_iirFilterNode.html]
+[test_iirFilterNodeGetFrequencyResponse.html]
diff --git a/dom/media/webaudio/test/blink/panner-model-testing.js b/dom/media/webaudio/test/blink/panner-model-testing.js
new file mode 100644
index 0000000000..45460e2768
--- /dev/null
+++ b/dom/media/webaudio/test/blink/panner-model-testing.js
@@ -0,0 +1,210 @@
+var sampleRate = 48000.0;
+
+var numberOfChannels = 1;
+
+// Time step when each panner node starts.
+var timeStep = 0.001;
+
+// Length of the impulse signal.
+var pulseLengthFrames = Math.round(timeStep * sampleRate);
+
+// How many panner nodes to create for the test
+var nodesToCreate = 100;
+
+// Be sure we render long enough for all of our nodes.
+var renderLengthSeconds = timeStep * (nodesToCreate + 1);
+
+// These are global mostly for debugging.
+var context;
+var impulse;
+var bufferSource;
+var panner;
+var position;
+var time;
+
+var renderedBuffer;
+var renderedLeft;
+var renderedRight;
+
+function createGraph(context, nodeCount) {
+ bufferSource = new Array(nodeCount);
+ panner = new Array(nodeCount);
+ position = new Array(nodeCount);
+ time = new Array(nodeCount);
+ // Angle between panner locations. (nodeCount - 1 because we want
+ // to include both 0 and 180 deg.
+ var angleStep = Math.PI / (nodeCount - 1);
+
+ if (numberOfChannels == 2) {
+ impulse = createStereoImpulseBuffer(context, pulseLengthFrames);
+ }
+ else
+ impulse = createImpulseBuffer(context, pulseLengthFrames);
+
+ for (var k = 0; k < nodeCount; ++k) {
+ bufferSource[k] = context.createBufferSource();
+ bufferSource[k].buffer = impulse;
+
+ panner[k] = context.createPanner();
+ panner[k].panningModel = "equalpower";
+ panner[k].distanceModel = "linear";
+
+ var angle = angleStep * k;
+ position[k] = {angle : angle, x : Math.cos(angle), z : Math.sin(angle)};
+ panner[k].positionX.value = position[k].x;
+ panner[k].positionZ.value = position[k].z;
+
+ bufferSource[k].connect(panner[k]);
+ panner[k].connect(context.destination);
+
+ // Start the source
+ time[k] = k * timeStep;
+ bufferSource[k].start(time[k]);
+ }
+}
+
+function createTestAndRun(context, nodeCount, numberOfSourceChannels) {
+ numberOfChannels = numberOfSourceChannels;
+
+ createGraph(context, nodeCount);
+
+ context.oncomplete = checkResult;
+ context.startRendering();
+}
+
+// Map our position angle to the azimuth angle (in degrees).
+//
+// An angle of 0 corresponds to an azimuth of 90 deg; pi, to -90 deg.
+function angleToAzimuth(angle) {
+ return 90 - angle * 180 / Math.PI;
+}
+
+// The gain caused by the EQUALPOWER panning model
+function equalPowerGain(angle) {
+ var azimuth = angleToAzimuth(angle);
+
+ if (numberOfChannels == 1) {
+ var panPosition = (azimuth + 90) / 180;
+
+ var gainL = Math.cos(0.5 * Math.PI * panPosition);
+ var gainR = Math.sin(0.5 * Math.PI * panPosition);
+
+ return { left : gainL, right : gainR };
+ } else {
+ if (azimuth <= 0) {
+ var panPosition = (azimuth + 90) / 90;
+
+ var gainL = 1 + Math.cos(0.5 * Math.PI * panPosition);
+ var gainR = Math.sin(0.5 * Math.PI * panPosition);
+
+ return { left : gainL, right : gainR };
+ } else {
+ var panPosition = azimuth / 90;
+
+ var gainL = Math.cos(0.5 * Math.PI * panPosition);
+ var gainR = 1 + Math.sin(0.5 * Math.PI * panPosition);
+
+ return { left : gainL, right : gainR };
+ }
+ }
+}
+
+function checkResult(event) {
+ renderedBuffer = event.renderedBuffer;
+ renderedLeft = renderedBuffer.getChannelData(0);
+ renderedRight = renderedBuffer.getChannelData(1);
+
+ // The max error we allow between the rendered impulse and the
+ // expected value. This value is experimentally determined. Set
+ // to 0 to make the test fail to see what the actual error is.
+ var maxAllowedError = 1.3e-6;
+
+ var success = true;
+
+ // Number of impulses found in the rendered result.
+ var impulseCount = 0;
+
+ // Max (relative) error and the index of the maxima for the left
+ // and right channels.
+ var maxErrorL = 0;
+ var maxErrorIndexL = 0;
+ var maxErrorR = 0;
+ var maxErrorIndexR = 0;
+
+ // Number of impulses that don't match our expected locations.
+ var timeCount = 0;
+
+ // Locations of where the impulses aren't at the expected locations.
+ var timeErrors = new Array();
+
+ for (var k = 0; k < renderedLeft.length; ++k) {
+ // We assume that the left and right channels start at the same instant.
+ if (renderedLeft[k] != 0 || renderedRight[k] != 0) {
+ // The expected gain for the left and right channels.
+ var pannerGain = equalPowerGain(position[impulseCount].angle);
+ var expectedL = pannerGain.left;
+ var expectedR = pannerGain.right;
+
+ // Absolute error in the gain.
+ var errorL = Math.abs(renderedLeft[k] - expectedL);
+ var errorR = Math.abs(renderedRight[k] - expectedR);
+
+ if (Math.abs(errorL) > maxErrorL) {
+ maxErrorL = Math.abs(errorL);
+ maxErrorIndexL = impulseCount;
+ }
+ if (Math.abs(errorR) > maxErrorR) {
+ maxErrorR = Math.abs(errorR);
+ maxErrorIndexR = impulseCount;
+ }
+
+ // Keep track of the impulses that didn't show up where we
+ // expected them to be.
+ var expectedOffset = timeToSampleFrame(time[impulseCount], sampleRate);
+ if (k != expectedOffset) {
+ timeErrors[timeCount] = { actual : k, expected : expectedOffset};
+ ++timeCount;
+ }
+ ++impulseCount;
+ }
+ }
+
+ if (impulseCount == nodesToCreate) {
+ testPassed("Number of impulses matches the number of panner nodes.");
+ } else {
+ testFailed("Number of impulses is incorrect. (Found " + impulseCount + " but expected " + nodesToCreate + ")");
+ success = false;
+ }
+
+ if (timeErrors.length > 0) {
+ success = false;
+ testFailed(timeErrors.length + " timing errors found in " + nodesToCreate + " panner nodes.");
+ for (var k = 0; k < timeErrors.length; ++k) {
+ testFailed("Impulse at sample " + timeErrors[k].actual + " but expected " + timeErrors[k].expected);
+ }
+ } else {
+ testPassed("All impulses at expected offsets.");
+ }
+
+ if (maxErrorL <= maxAllowedError) {
+ testPassed("Left channel gain values are correct.");
+ } else {
+ testFailed("Left channel gain values are incorrect. Max error = " + maxErrorL + " at time " + time[maxErrorIndexL] + " (threshold = " + maxAllowedError + ")");
+ success = false;
+ }
+
+ if (maxErrorR <= maxAllowedError) {
+ testPassed("Right channel gain values are correct.");
+ } else {
+ testFailed("Right channel gain values are incorrect. Max error = " + maxErrorR + " at time " + time[maxErrorIndexR] + " (threshold = " + maxAllowedError + ")");
+ success = false;
+ }
+
+ if (success) {
+ testPassed("EqualPower panner test passed");
+ } else {
+ testFailed("EqualPower panner test failed");
+ }
+
+ finishJSTest();
+}
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html
new file mode 100644
index 0000000000..024c2f50df
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode All Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ var filterParameters = [{cutoff : 0, q : 10, gain : 1 },
+ {cutoff : 1, q : 10, gain : 1 },
+ {cutoff : .5, q : 0, gain : 1 },
+ {cutoff : 0.25, q : 10, gain : 1 },
+ ];
+ createTestAndRun(context, "allpass", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html
new file mode 100644
index 0000000000..5a71ce46e5
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html
@@ -0,0 +1,351 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode All Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Don't need to run these tests at high sampling rate, so just use a low one to reduce memory
+ // usage and complexity.
+ var sampleRate = 16000;
+
+ // How long to render for each test.
+ var renderDuration = 1;
+
+ // The definition of the linear ramp automation function.
+ function linearRamp(t, v0, v1, t0, t1) {
+ return v0 + (v1 - v0) * (t - t0) / (t1 - t0);
+ }
+
+ // Generate the filter coefficients for the specified filter using the given parameters for
+ // the given duration. |filterTypeFunction| is a function that returns the filter
+ // coefficients for one set of parameters. |parameters| is a property bag that contains the
+ // start and end values (as an array) for each of the biquad attributes. The properties are
+ // |freq|, |Q|, |gain|, and |detune|. |duration| is the number of seconds for which the
+ // coefficients are generated.
+ //
+ // A property bag with properties |b0|, |b1|, |b2|, |a1|, |a2|. Each propery is an array
+ // consisting of the coefficients for the time-varying biquad filter.
+ function generateFilterCoefficients(filterTypeFunction, parameters, duration) {
+ var endFrame = Math.ceil(duration * sampleRate);
+ var nCoef = endFrame;
+ var b0 = new Float64Array(nCoef);
+ var b1 = new Float64Array(nCoef);
+ var b2 = new Float64Array(nCoef);
+ var a1 = new Float64Array(nCoef);
+ var a2 = new Float64Array(nCoef);
+
+ var k = 0;
+ // If the property is not given, use the defaults.
+ var freqs = parameters.freq || [350, 350];
+ var qs = parameters.Q || [1, 1];
+ var gains = parameters.gain || [0, 0];
+ var detunes = parameters.detune || [0, 0];
+
+ for (var frame = 0; frame < endFrame; ++frame) {
+ // Apply linear ramp at frame |frame|.
+ var f = linearRamp(frame / sampleRate, freqs[0], freqs[1], 0, duration);
+ var q = linearRamp(frame / sampleRate, qs[0], qs[1], 0, duration);
+ var g = linearRamp(frame / sampleRate, gains[0], gains[1], 0, duration);
+ var d = linearRamp(frame / sampleRate, detunes[0], detunes[1], 0, duration);
+
+ // Compute actual frequency parameter
+ f = f * Math.pow(2, d / 1200);
+
+ // Compute filter coefficients
+ var coef = filterTypeFunction(f / (sampleRate / 2), q, g);
+ b0[k] = coef.b0;
+ b1[k] = coef.b1;
+ b2[k] = coef.b2;
+ a1[k] = coef.a1;
+ a2[k] = coef.a2;
+ ++k;
+ }
+
+ return {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2};
+ }
+
+ // Apply the given time-varying biquad filter to the given signal, |signal|. |coef| should be
+ // the time-varying coefficients of the filter, as returned by |generateFilterCoefficients|.
+ function timeVaryingFilter(signal, coef) {
+ var length = signal.length;
+ // Use double precision for the internal computations.
+ var y = new Float64Array(length);
+
+ // Prime the pump. (Assumes the signal has length >= 2!)
+ y[0] = coef.b0[0] * signal[0];
+ y[1] = coef.b0[1] * signal[1] + coef.b1[1] * signal[0] - coef.a1[1] * y[0];
+
+ for (var n = 2; n < length; ++n) {
+ y[n] = coef.b0[n] * signal[n] + coef.b1[n] * signal[n-1] + coef.b2[n] * signal[n-2];
+ y[n] -= coef.a1[n] * y[n-1] + coef.a2[n] * y[n-2];
+ }
+
+ // But convert the result to single precision for comparison.
+ return y.map(Math.fround);
+ }
+
+ // Configure the audio graph using |context|. Returns the biquad filter node and the
+ // AudioBuffer used for the source.
+ function configureGraph(context, toneFrequency) {
+ // The source is just a simple sine wave.
+ var src = context.createBufferSource();
+ var b = context.createBuffer(1, renderDuration * sampleRate, sampleRate);
+ var data = b.getChannelData(0);
+ var omega = 2 * Math.PI * toneFrequency / sampleRate;
+ for (var k = 0; k < data.length; ++k) {
+ data[k] = Math.sin(omega * k);
+ }
+ src.buffer = b;
+ var f = context.createBiquadFilter();
+ src.connect(f);
+ f.connect(context.destination);
+
+ src.start();
+
+ return {filter: f, source: b};
+ }
+
+ function createFilterVerifier(filterCreator, threshold, parameters, input, message) {
+ return function (resultBuffer) {
+ var actual = resultBuffer.getChannelData(0);
+ var coefs = generateFilterCoefficients(filterCreator, parameters, renderDuration);
+
+ reference = timeVaryingFilter(input, coefs);
+
+ compareChannels(actual, reference);
+ };
+ }
+
+ var testPromises = [];
+
+ // Automate just the frequency parameter. A bandpass filter is used where the center
+ // frequency is swept across the source (which is a simple tone).
+ testPromises.push(function () {
+ var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // Center frequency of bandpass filter and also the frequency of the test tone.
+ var centerFreq = 10*440;
+
+ // Sweep the frequency +/- 9*440 Hz from the center. This should cause the output to low at
+ // the beginning and end of the test where the done is outside the pass band of the filter,
+ // but high in the center where the tone is near the center of the pass band.
+ var parameters = {
+ freq: [centerFreq - 9*440, centerFreq + 9*440]
+ }
+ var graph = configureGraph(context, centerFreq);
+ var f = graph.filter;
+ var b = graph.source;
+
+ f.type = "bandpass";
+ f.frequency.setValueAtTime(parameters.freq[0], 0);
+ f.frequency.linearRampToValueAtTime(parameters.freq[1], renderDuration);
+
+ return context.startRendering()
+ .then(createFilterVerifier(createBandpassFilter, 5e-5, parameters, b.getChannelData(0),
+ "Output of bandpass filter with frequency automation"));
+ }());
+
+ // Automate just the Q parameter. A bandpass filter is used where the Q of the filter is
+ // swept.
+ testPromises.push(function() {
+ var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // The frequency of the test tone.
+ var centerFreq = 440;
+
+ // Sweep the Q paramter between 1 and 200. This will cause the output of the filter to pass
+ // most of the tone at the beginning to passing less of the tone at the end. This is
+ // because we set center frequency of the bandpass filter to be slightly off from the actual
+ // tone.
+ var parameters = {
+ Q: [1, 200],
+ // Center frequency of the bandpass filter is just 25 Hz above the tone frequency.
+ freq: [centerFreq + 25, centerFreq + 25]
+ };
+ var graph = configureGraph(context, centerFreq);
+ var f = graph.filter;
+ var b = graph.source;
+
+ f.type = "bandpass";
+ f.frequency.value = parameters.freq[0];
+ f.Q.setValueAtTime(parameters.Q[0], 0);
+ f.Q.linearRampToValueAtTime(parameters.Q[1], renderDuration);
+
+ return context.startRendering()
+ .then(createFilterVerifier(createBandpassFilter, 1.4e-6, parameters, b.getChannelData(0),
+ "Output of bandpass filter with Q automation"));
+ }());
+
+ // Automate just the gain of the lowshelf filter. A test tone will be in the lowshelf part of
+ // the filter. The output will vary as the gain of the lowshelf is changed.
+ testPromises.push(function() {
+ var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // Frequency of the test tone.
+ var centerFreq = 440;
+
+ // Set the cutoff frequency of the lowshelf to be significantly higher than the test tone.
+ // Sweep the gain from 20 dB to -20 dB. (We go from 20 to -20 to easily verify that the
+ // filter didn't go unstable.)
+ var parameters = {
+ freq: [3500, 3500],
+ gain: [20, -20]
+ }
+ var graph = configureGraph(context, centerFreq);
+ var f = graph.filter;
+ var b = graph.source;
+
+ f.type = "lowshelf";
+ f.frequency.value = parameters.freq[0];
+ f.gain.setValueAtTime(parameters.gain[0], 0);
+ f.gain.linearRampToValueAtTime(parameters.gain[1], renderDuration);
+
+ context.startRendering()
+ .then(createFilterVerifier(createLowShelfFilter, 8e-6, parameters, b.getChannelData(0),
+ "Output of lowshelf filter with gain automation"));
+ }());
+
+ // Automate just the detune parameter. Basically the same test as for the frequncy parameter
+ // but we just use the detune parameter to modulate the frequency parameter.
+ testPromises.push(function() {
+ var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ var centerFreq = 10*440;
+ var parameters = {
+ freq: [centerFreq, centerFreq],
+ detune: [-10*1200, 10*1200]
+ };
+ var graph = configureGraph(context, centerFreq);
+ var f = graph.filter;
+ var b = graph.source;
+
+ f.type = "bandpass";
+ f.frequency.value = parameters.freq[0];
+ f.detune.setValueAtTime(parameters.detune[0], 0);
+ f.detune.linearRampToValueAtTime(parameters.detune[1], renderDuration);
+
+ context.startRendering()
+ .then(createFilterVerifier(createBandpassFilter, 5e-6, parameters, b.getChannelData(0),
+ "Output of bandpass filter with detune automation"));
+ }());
+
+ // Automate all of the filter parameters at once. This is a basic check that everything is
+ // working. A peaking filter is used because it uses all of the parameters.
+ testPromises.push(function() {
+ var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ var graph = configureGraph(context, 10*440);
+ var f = graph.filter;
+ var b = graph.source;
+
+ // Sweep all of the filter parameters. These are pretty much arbitrary.
+ var parameters = {
+ freq: [10000, 100],
+ Q: [f.Q.value, .0001],
+ gain: [f.gain.value, 20],
+ detune: [2400, -2400]
+ };
+
+ f.type = "peaking";
+ // Set starting points for all parameters of the filter. Start at 10 kHz for the center
+ // frequency, and the defaults for Q and gain.
+ f.frequency.setValueAtTime(parameters.freq[0], 0);
+ f.Q.setValueAtTime(parameters.Q[0], 0);
+ f.gain.setValueAtTime(parameters.gain[0], 0);
+ f.detune.setValueAtTime(parameters.detune[0], 0);
+
+ // Linear ramp each parameter
+ f.frequency.linearRampToValueAtTime(parameters.freq[1], renderDuration);
+ f.Q.linearRampToValueAtTime(parameters.Q[1], renderDuration);
+ f.gain.linearRampToValueAtTime(parameters.gain[1], renderDuration);
+ f.detune.linearRampToValueAtTime(parameters.detune[1], renderDuration);
+
+ context.startRendering()
+ .then(createFilterVerifier(createPeakingFilter, 3.3e-4, parameters, b.getChannelData(0),
+ "Output of peaking filter with automation of all parameters"));
+ }());
+
+ // Test that modulation of the frequency parameter of the filter works. A sinusoid of 440 Hz
+ // is the test signal that is applied to a bandpass biquad filter. The frequency parameter of
+ // the filter is modulated by a sinusoid at 103 Hz, and the frequency modulation varies from
+ // 116 to 412 Hz. (This test was taken from the description in
+ // https://github.com/WebAudio/web-audio-api/issues/509#issuecomment-94731355)
+ testPromises.push(function() {
+ var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // Create a graph with the sinusoidal source at 440 Hz as the input to a biquad filter.
+ var graph = configureGraph(context, 440);
+ var f = graph.filter;
+ var b = graph.source;
+
+ f.type = "bandpass";
+ f.Q.value = 5;
+ f.frequency.value = 264;
+
+ // Create the modulation source, a sinusoid with frequency 103 Hz and amplitude 148. (The
+ // amplitude of 148 is added to the filter's frequency value of 264 to produce a sinusoidal
+ // modulation of the frequency parameter from 116 to 412 Hz.)
+ var mod = context.createBufferSource();
+ var mbuffer = context.createBuffer(1, renderDuration * sampleRate, sampleRate);
+ var d = mbuffer.getChannelData(0);
+ var omega = 2 * Math.PI * 103 / sampleRate;
+ for (var k = 0; k < d.length; ++k) {
+ d[k] = 148 * Math.sin(omega * k);
+ }
+ mod.buffer = mbuffer;
+
+ mod.connect(f.frequency);
+
+ mod.start();
+ return context.startRendering()
+ .then(function (resultBuffer) {
+ var actual = resultBuffer.getChannelData(0);
+ // Compute the filter coefficients using the mod sine wave
+
+ var endFrame = Math.ceil(renderDuration * sampleRate);
+ var nCoef = endFrame;
+ var b0 = new Float64Array(nCoef);
+ var b1 = new Float64Array(nCoef);
+ var b2 = new Float64Array(nCoef);
+ var a1 = new Float64Array(nCoef);
+ var a2 = new Float64Array(nCoef);
+
+ // Generate the filter coefficients when the frequency varies from 116 to 248 Hz using
+ // the 103 Hz sinusoid.
+ for (var k = 0; k < nCoef; ++k) {
+ var freq = f.frequency.value + d[k];
+ var c = createBandpassFilter(freq / (sampleRate / 2), f.Q.value, f.gain.value);
+ b0[k] = c.b0;
+ b1[k] = c.b1;
+ b2[k] = c.b2;
+ a1[k] = c.a1;
+ a2[k] = c.a2;
+ }
+ reference = timeVaryingFilter(b.getChannelData(0),
+ {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2});
+
+ compareChannels(actual, reference);
+ });
+ }());
+
+ // Wait for all tests
+ Promise.all(testPromises).then(function () {
+ SimpleTest.finish();
+ }, function () {
+ SimpleTest.finish();
+ });
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html
new file mode 100644
index 0000000000..05c4c3a265
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode Band Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ var filterParameters = [{cutoff : 0, q : 0, gain : 1 },
+ {cutoff : 1, q : 0, gain : 1 },
+ {cutoff : 0.5, q : 0, gain : 1 },
+ {cutoff : 0.25, q : 1, gain : 1 },
+ ];
+
+ createTestAndRun(context, "bandpass", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html
new file mode 100644
index 0000000000..c2b6612034
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html
@@ -0,0 +1,261 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode All Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+// Test the frequency response of a biquad filter. We compute the frequency response for a simple
+// peaking biquad filter and compare it with the expected frequency response. The actual filter
+// used doesn't matter since we're testing getFrequencyResponse and not the actual filter output.
+// The filters are extensively tested in other biquad tests.
+
+var context;
+
+// The biquad filter node.
+var filter;
+
+// The magnitude response of the biquad filter.
+var magResponse;
+
+// The phase response of the biquad filter.
+var phaseResponse;
+
+// Number of frequency samples to take.
+var numberOfFrequencies = 1000;
+
+// The filter parameters.
+var filterCutoff = 1000; // Hz.
+var filterQ = 1;
+var filterGain = 5; // Decibels.
+
+// The maximum allowed error in the magnitude response.
+var maxAllowedMagError = 5.7e-7;
+
+// The maximum allowed error in the phase response.
+var maxAllowedPhaseError = 4.7e-8;
+
+// The magnitudes and phases of the reference frequency response.
+var magResponse;
+var phaseResponse;
+
+// The magnitudes and phases of the reference frequency response.
+var expectedMagnitudes;
+var expectedPhases;
+
+// Convert frequency in Hz to a normalized frequency between 0 to 1 with 1 corresponding to the
+// Nyquist frequency.
+function normalizedFrequency(freqHz, sampleRate)
+{
+ var nyquist = sampleRate / 2;
+ return freqHz / nyquist;
+}
+
+// Get the filter response at a (normalized) frequency |f| for the filter with coefficients |coef|.
+function getResponseAt(coef, f)
+{
+ var b0 = coef.b0;
+ var b1 = coef.b1;
+ var b2 = coef.b2;
+ var a1 = coef.a1;
+ var a2 = coef.a2;
+
+ // H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2)
+ //
+ // Compute H(exp(i * pi * f)). No native complex numbers in javascript, so break H(exp(i * pi * // f))
+ // in to the real and imaginary parts of the numerator and denominator. Let omega = pi * f.
+ // Then the numerator is
+ //
+ // b0 + b1 * cos(omega) + b2 * cos(2 * omega) - i * (b1 * sin(omega) + b2 * sin(2 * omega))
+ //
+ // and the denominator is
+ //
+ // 1 + a1 * cos(omega) + a2 * cos(2 * omega) - i * (a1 * sin(omega) + a2 * sin(2 * omega))
+ //
+ // Compute the magnitude and phase from the real and imaginary parts.
+
+ var omega = Math.PI * f;
+ var numeratorReal = b0 + b1 * Math.cos(omega) + b2 * Math.cos(2 * omega);
+ var numeratorImag = -(b1 * Math.sin(omega) + b2 * Math.sin(2 * omega));
+ var denominatorReal = 1 + a1 * Math.cos(omega) + a2 * Math.cos(2 * omega);
+ var denominatorImag = -(a1 * Math.sin(omega) + a2 * Math.sin(2 * omega));
+
+ var magnitude = Math.sqrt((numeratorReal * numeratorReal + numeratorImag * numeratorImag)
+ / (denominatorReal * denominatorReal + denominatorImag * denominatorImag));
+ var phase = Math.atan2(numeratorImag, numeratorReal) - Math.atan2(denominatorImag, denominatorReal);
+
+ if (phase >= Math.PI) {
+ phase -= 2 * Math.PI;
+ } else if (phase <= -Math.PI) {
+ phase += 2 * Math.PI;
+ }
+
+ return {magnitude : magnitude, phase : phase};
+}
+
+// Compute the reference frequency response for the biquad filter |filter| at the frequency samples
+// given by |frequencies|.
+function frequencyResponseReference(filter, frequencies)
+{
+ var sampleRate = filter.context.sampleRate;
+ var normalizedFreq = normalizedFrequency(filter.frequency.value, sampleRate);
+ var filterCoefficients = createFilter(filter.type, normalizedFreq, filter.Q.value, filter.gain.value);
+
+ var magnitudes = [];
+ var phases = [];
+
+ for (var k = 0; k < frequencies.length; ++k) {
+ var response = getResponseAt(filterCoefficients, normalizedFrequency(frequencies[k], sampleRate));
+ magnitudes.push(response.magnitude);
+ phases.push(response.phase);
+ }
+
+ return {magnitudes : magnitudes, phases : phases};
+}
+
+// Compute a set of linearly spaced frequencies.
+function createFrequencies(nFrequencies, sampleRate)
+{
+ var frequencies = new Float32Array(nFrequencies);
+ var nyquist = sampleRate / 2;
+ var freqDelta = nyquist / nFrequencies;
+
+ for (var k = 0; k < nFrequencies; ++k) {
+ frequencies[k] = k * freqDelta;
+ }
+
+ return frequencies;
+}
+
+function linearToDecibels(x)
+{
+ if (x) {
+ return 20 * Math.log(x) / Math.LN10;
+ } else {
+ return -1000;
+ }
+}
+
+// Look through the array and find any NaN or infinity. Returns the index of the first occurence or
+// -1 if none.
+function findBadNumber(signal)
+{
+ for (var k = 0; k < signal.length; ++k) {
+ if (!isValidNumber(signal[k])) {
+ return k;
+ }
+ }
+ return -1;
+}
+
+// Compute absolute value of the difference between phase angles, taking into account the wrapping
+// of phases.
+function absolutePhaseDifference(x, y)
+{
+ var diff = Math.abs(x - y);
+
+ if (diff > Math.PI) {
+ diff = 2 * Math.PI - diff;
+ }
+ return diff;
+}
+
+// Compare the frequency response with our expected response.
+function compareResponses(filter, frequencies, magResponse, phaseResponse)
+{
+ var expectedResponse = frequencyResponseReference(filter, frequencies);
+
+ expectedMagnitudes = expectedResponse.magnitudes;
+ expectedPhases = expectedResponse.phases;
+
+ var n = magResponse.length;
+ var success = true;
+ var badResponse = false;
+
+ var maxMagError = -1;
+ var maxMagErrorIndex = -1;
+
+ var k;
+ var hasBadNumber;
+
+ hasBadNumber = findBadNumber(magResponse);
+ ok (hasBadNumber < 0, "Magnitude response has NaN or infinity at " + hasBadNumber);
+
+ hasBadNumber = findBadNumber(phaseResponse);
+ ok (hasBadNumber < 0, "Phase response has NaN or infinity at " + hasBadNumber);
+
+ // These aren't testing the implementation itself. Instead, these are sanity checks on the
+ // reference. Failure here does not imply an error in the implementation.
+ hasBadNumber = findBadNumber(expectedMagnitudes);
+ ok (hasBadNumber < 0, "Expected magnitude response has NaN or infinity at " + hasBadNumber);
+
+ hasBadNumber = findBadNumber(expectedPhases);
+ ok (hasBadNumber < 0, "Expected phase response has NaN or infinity at " + hasBadNumber);
+
+ for (k = 0; k < n; ++k) {
+ var error = Math.abs(linearToDecibels(magResponse[k]) - linearToDecibels(expectedMagnitudes[k]));
+ if (error > maxMagError) {
+ maxMagError = error;
+ maxMagErrorIndex = k;
+ }
+ }
+
+ var message = "Magnitude error (" + maxMagError + " dB)";
+ message += " exceeded threshold at " + frequencies[maxMagErrorIndex];
+ message += " Hz. Actual: " + linearToDecibels(magResponse[maxMagErrorIndex]);
+ message += " dB, expected: " + linearToDecibels(expectedMagnitudes[maxMagErrorIndex]) + " dB.";
+ ok(maxMagError < maxAllowedMagError, message);
+
+ var maxPhaseError = -1;
+ var maxPhaseErrorIndex = -1;
+
+ for (k = 0; k < n; ++k) {
+ var error = absolutePhaseDifference(phaseResponse[k], expectedPhases[k]);
+ if (error > maxPhaseError) {
+ maxPhaseError = error;
+ maxPhaseErrorIndex = k;
+ }
+ }
+
+ message = "Phase error (radians) (" + maxPhaseError;
+ message += ") exceeded threshold at " + frequencies[maxPhaseErrorIndex];
+ message += " Hz. Actual: " + phaseResponse[maxPhaseErrorIndex];
+ message += " expected: " + expectedPhases[maxPhaseErrorIndex];
+
+ ok(maxPhaseError < maxAllowedPhaseError, message);
+}
+
+context = new AudioContext();
+
+filter = context.createBiquadFilter();
+
+// Arbitrarily test a peaking filter, but any kind of filter can be tested.
+filter.type = "peaking";
+filter.frequency.value = filterCutoff;
+filter.Q.value = filterQ;
+filter.gain.value = filterGain;
+
+var frequencies = createFrequencies(numberOfFrequencies, context.sampleRate);
+magResponse = new Float32Array(numberOfFrequencies);
+phaseResponse = new Float32Array(numberOfFrequencies);
+
+filter.getFrequencyResponse(frequencies, magResponse, phaseResponse);
+compareResponses(filter, frequencies, magResponse, phaseResponse);
+
+SimpleTest.finish();
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html
new file mode 100644
index 0000000000..a615574c2d
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode High Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ var filterParameters = [{cutoff : 0, q : 1, gain : 1 },
+ {cutoff : 1, q : 1, gain : 1 },
+ {cutoff : 0.25, q : 1, gain : 1 },
+ ];
+
+ createTestAndRun(context, "highpass", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html
new file mode 100644
index 0000000000..c8f6815930
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode High Shelf Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ var filterParameters = [{cutoff : 0, q : 10, gain : 10 },
+ {cutoff : 1, q : 10, gain : 10 },
+ {cutoff : 0.25, q : 10, gain : 10 },
+ ];
+
+ createTestAndRun(context, "highshelf", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html
new file mode 100644
index 0000000000..dcea18551a
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode Low Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ var filterParameters = [{cutoff : 0, q : 1, gain : 1 },
+ {cutoff : 1, q : 1, gain : 1 },
+ {cutoff : 0.25, q : 1, gain : 1 },
+ {cutoff : 0.25, q : 1, gain : 1, detune : 100 },
+ {cutoff : 0.01, q : 1, gain : 1, detune : -200 },
+ ];
+ createTestAndRun(context, "lowpass", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html
new file mode 100644
index 0000000000..c1349f8e7e
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode Low Shelf Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ var filterParameters = [{cutoff : 0, q : 10, gain : 10 },
+ {cutoff : 1, q : 10, gain : 10 },
+ {cutoff : 0.25, q : 10, gain : 10 },
+ ];
+
+ createTestAndRun(context, "lowshelf", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html
new file mode 100644
index 0000000000..0fcbc5546e
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode Notch Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ var filterParameters = [{cutoff : 0, q : 10, gain : 1 },
+ {cutoff : 1, q : 10, gain : 1 },
+ {cutoff : .5, q : 0, gain : 1 },
+ {cutoff : 0.25, q : 10, gain : 1 },
+ ];
+
+ createTestAndRun(context, "notch", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html b/dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html
new file mode 100644
index 0000000000..8e4727a37e
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode Low Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ var filterParameters = [{cutoff : 0, q : 10, gain : 10 },
+ {cutoff : 1, q : 10, gain : 10 },
+ {cutoff : .5, q : 0, gain : 10 },
+ {cutoff : 0.25, q : 10, gain : 10 },
+ ];
+
+ createTestAndRun(context, "peaking", filterParameters);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html
new file mode 100644
index 0000000000..cc170df2a5
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html
@@ -0,0 +1,76 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode All Pass Filter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="audio-testing.js"></script>
+<script src="biquad-filters.js"></script>
+<script src="biquad-testing.js"></script>
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ // A high sample rate shows the issue more clearly.
+ var sampleRate = 192000;
+ // Some short duration because we don't need to run the test for very long.
+ var testDurationSec = 0.5;
+ var testDurationFrames = testDurationSec * sampleRate;
+
+ // Amplitude experimentally determined to give a biquad output close to 1. (No attempt was
+ // made to produce exactly 1; it's not needed.)
+ var sourceAmplitude = 100;
+
+ // The output of the biquad filter should not change by more than this much between output
+ // samples. Threshold was determined experimentally.
+ var glitchThreshold = 0.01292;
+
+ // Test that a Biquad filter doesn't have it's output terminated because the input has gone
+ // away. Generally, when a source node is finished, it disconnects itself from any downstream
+ // nodes. This is the correct behavior. Nodes that have no inputs (disconnected) are
+ // generally assumed to output zeroes. This is also desired behavior. However, biquad
+ // filters have memory so they should not suddenly output zeroes when the input is
+ // disconnected. This test checks to see if the output doesn't suddenly change to zero.
+ var context = new OfflineAudioContext(1, testDurationFrames, sampleRate);
+
+ // Create an impulse source.
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = sourceAmplitude;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Create the biquad filter. It doesn't really matter what kind, so the default filter type
+ // and parameters is fine. Connect the source to it.
+ var biquad = context.createBiquadFilter();
+ source.connect(biquad);
+ biquad.connect(context.destination);
+
+ source.start();
+
+ context.startRendering().then(function(result) {
+ // There should be no large discontinuities in the output
+ var buffer = result.getChannelData(0);
+ var maxGlitchIndex = 0;
+ var maxGlitchValue = 0.0;
+ for (var i = 1; i < buffer.length; i++) {
+ var diff = Math.abs(buffer[i-1] - buffer[i]);
+ if (diff >= glitchThreshold) {
+ if (diff > maxGlitchValue) {
+ maxGlitchIndex = i;
+ maxGlitchValue = diff;
+ }
+ }
+ }
+ ok(maxGlitchIndex == 0, 'glitches detected in biquad output: maximum glitch at ' + maxGlitchIndex + ' with diff of ' + maxGlitchValue);
+ SimpleTest.finish();
+ })
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_iirFilterNode.html b/dom/media/webaudio/test/blink/test_iirFilterNode.html
new file mode 100644
index 0000000000..0ef7a37e3b
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_iirFilterNode.html
@@ -0,0 +1,467 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test IIRFilterNode GetFrequencyResponse</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <script type="text/javascript" src="biquad-filters.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ var sampleRate = 48000;
+ var testDurationSec = 1;
+ var testFrames = testDurationSec * sampleRate;
+
+ var testPromises = []
+ testPromises.push(function () {
+ // Test that the feedback coefficients are normalized. Do this be creating two
+ // IIRFilterNodes. One has normalized coefficients, and one doesn't. Compute the
+ // difference and make sure they're the same.
+ var context = new OfflineAudioContext(2, testFrames, sampleRate);
+
+ // Use a simple impulse as the source.
+ var buffer = context.createBuffer(1, 1, sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Gain node for computing the difference between the filters.
+ var gain = context.createGain();
+ gain.gain.value = -1;
+
+ // The IIR filters. Use a common feedforward array.
+ var ff = [1];
+
+ var fb1 = [1, .9];
+
+ var fb2 = new Float64Array(2);
+ // Scale the feedback coefficients by an arbitrary factor.
+ var coefScaleFactor = 2;
+ for (var k = 0; k < fb2.length; ++k) {
+ fb2[k] = coefScaleFactor * fb1[k];
+ }
+
+ var iir1 = context.createIIRFilter(ff, fb1);
+ var iir2 = context.createIIRFilter(ff, fb2);
+
+ // Create the graph. The output of iir1 (normalized coefficients) is channel 0, and the
+ // output of iir2 (unnormalized coefficients), with appropriate scaling, is channel 1.
+ var merger = context.createChannelMerger(2);
+ source.connect(iir1);
+ source.connect(iir2);
+ iir1.connect(merger, 0, 0);
+ iir2.connect(gain);
+
+ // The gain for the gain node should be set to compensate for the scaling of the
+ // coefficients. Since iir2 has scaled the coefficients by coefScaleFactor, the output is
+ // reduced by the same factor, so adjust the gain to scale the output of iir2 back up.
+ gain.gain.value = coefScaleFactor;
+ gain.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ source.start();
+
+ // Rock and roll!
+
+ return context.startRendering().then(function (result) {
+ // Find the max amplitude of the result, which should be near zero.
+ var iir1Data = result.getChannelData(0);
+ var iir2Data = result.getChannelData(1);
+
+ // Threshold isn't exactly zero because the arithmetic is done differently between the
+ // IIRFilterNode and the BiquadFilterNode.
+ compareChannels(iir1Data, iir2Data);
+ });
+ }());
+
+ testPromises.push(function () {
+ // Create a simple 1-zero filter and compare with the expected output.
+ var context = new OfflineAudioContext(1, testFrames, sampleRate);
+
+ // Use a simple impulse as the source
+ var buffer = context.createBuffer(1, 1, sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // The filter is y(n) = 0.5*(x(n) + x(n-1)), a simple 2-point moving average. This is
+ // rather arbitrary; keep it simple.
+
+ var iir = context.createIIRFilter([0.5, 0.5], [1]);
+
+ // Create the graph
+ source.connect(iir);
+ iir.connect(context.destination);
+
+ // Rock and roll!
+ source.start();
+
+ return context.startRendering().then(function (result) {
+ var actual = result.getChannelData(0);
+ var expected = new Float64Array(testFrames);
+ // The filter is a simple 2-point moving average of an impulse, so the first two values
+ // are non-zero and the rest are zero.
+ expected[0] = 0.5;
+ expected[1] = 0.5;
+ compareChannels(actual, expected);
+ });
+ }());
+
+ testPromises.push(function () {
+ // Create a simple 1-pole filter and compare with the expected output.
+
+ // The filter is y(n) + c*y(n-1)= x(n). The analytical response is (-c)^n, so choose a
+ // suitable number of frames to run the test for where the output isn't flushed to zero.
+ var c = 0.9;
+ var eps = 1e-20;
+ var duration = Math.floor(Math.log(eps) / Math.log(Math.abs(c)));
+ var context = new OfflineAudioContext(1, duration, sampleRate);
+
+ // Use a simple impulse as the source
+ var buffer = context.createBuffer(1, 1, sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ var iir = context.createIIRFilter([1], [1, c]);
+
+ // Create the graph
+ source.connect(iir);
+ iir.connect(context.destination);
+
+ // Rock and roll!
+ source.start();
+
+ return context.startRendering().then(function (result) {
+ var actual = result.getChannelData(0);
+ var expected = new Float64Array(actual.length);
+
+ // The filter is a simple 1-pole filter: y(n) = -c*y(n-k)+x(n), with an impulse as the
+ // input.
+ expected[0] = 1;
+ for (k = 1; k < testFrames; ++k) {
+ expected[k] = -c * expected[k-1];
+ }
+
+ compareChannels(actual, expected);
+ });
+ }());
+
+ // This function creates an IIRFilterNode equivalent to the specified
+ // BiquadFilterNode and compares the outputs. The
+ // outputs from the two filters should be virtually identical.
+ function testWithBiquadFilter(filterType) {
+ var context = new OfflineAudioContext(2, testFrames, sampleRate);
+
+ // Use a constant (step function) as the source
+ var buffer = context.createBuffer(1, testFrames, context.sampleRate);
+ for (var i = 0; i < testFrames; ++i) {
+ buffer.getChannelData(0)[i] = 1;
+ }
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Create the biquad. Choose some rather arbitrary values for Q and gain for the biquad
+ // so that the shelf filters aren't identical.
+ var biquad = context.createBiquadFilter();
+ biquad.type = filterType;
+ biquad.Q.value = 10;
+ biquad.gain.value = 10;
+
+ // Create the equivalent IIR Filter node by computing the coefficients of the given biquad
+ // filter type.
+ var nyquist = sampleRate / 2;
+ var coef = createFilter(filterType,
+ biquad.frequency.value / nyquist,
+ biquad.Q.value,
+ biquad.gain.value);
+
+ var iir = context.createIIRFilter([coef.b0, coef.b1, coef.b2], [1, coef.a1, coef.a2]);
+
+ var merger = context.createChannelMerger(2);
+ // Create the graph
+ source.connect(biquad);
+ source.connect(iir);
+
+ biquad.connect(merger, 0, 0);
+ iir.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ // Rock and roll!
+ source.start();
+
+ return context.startRendering().then(function (result) {
+ // Find the max amplitude of the result, which should be near zero.
+ var expected = result.getChannelData(0);
+ var actual = result.getChannelData(1);
+ compareChannels(actual, expected);
+ });
+ }
+
+ biquadFilterTypes = ["lowpass", "highpass", "bandpass", "notch",
+ "allpass", "lowshelf", "highshelf", "peaking"];
+
+ // Create a set of tasks based on biquadTestConfigs.
+ for (var i = 0; i < biquadFilterTypes.length; ++i) {
+ testPromises.push(testWithBiquadFilter(biquadFilterTypes[i]));
+ }
+
+ testPromises.push(function () {
+ // Multi-channel test. Create a biquad filter and the equivalent IIR filter. Filter the
+ // same multichannel signal and compare the results.
+ var nChannels = 3;
+ var context = new OfflineAudioContext(nChannels, testFrames, sampleRate);
+
+ // Create a set of oscillators as the multi-channel source.
+ var source = [];
+
+ for (k = 0; k < nChannels; ++k) {
+ source[k] = context.createOscillator();
+ source[k].type = "sawtooth";
+ // The frequency of the oscillator is pretty arbitrary, but each oscillator should have a
+ // different frequency.
+ source[k].frequency.value = 100 + k * 100;
+ }
+
+ var merger = context.createChannelMerger(3);
+
+ var biquad = context.createBiquadFilter();
+
+ // Create the equivalent IIR Filter node.
+ var nyquist = sampleRate / 2;
+ var coef = createFilter(biquad.type,
+ biquad.frequency.value / nyquist,
+ biquad.Q.value,
+ biquad.gain.value);
+ var fb = [1, coef.a1, coef.a2];
+ var ff = [coef.b0, coef.b1, coef.b2];
+
+ var iir = context.createIIRFilter(ff, fb);
+ // Gain node to compute the difference between the IIR and biquad filter.
+ var gain = context.createGain();
+ gain.gain.value = -1;
+
+ // Create the graph.
+ for (k = 0; k < nChannels; ++k)
+ source[k].connect(merger, 0, k);
+
+ merger.connect(biquad);
+ merger.connect(iir);
+ iir.connect(gain);
+ biquad.connect(context.destination);
+ gain.connect(context.destination);
+
+ for (k = 0; k < nChannels; ++k)
+ source[k].start();
+
+ return context.startRendering().then(function (result) {
+ var errorThresholds = [3.7671e-5, 3.0071e-5, 2.6241e-5];
+
+ // Check the difference signal on each channel
+ for (channel = 0; channel < result.numberOfChannels; ++channel) {
+ // Find the max amplitude of the result, which should be near zero.
+ var data = result.getChannelData(channel);
+ var maxError = data.reduce(function(reducedValue, currentValue) {
+ return Math.max(reducedValue, Math.abs(currentValue));
+ });
+
+ ok(maxError <= errorThresholds[channel], "Max difference between IIR and Biquad on channel " + channel);
+ }
+ });
+ }());
+
+ testPromises.push(function () {
+ // Apply an IIRFilter to the given input signal.
+ //
+ // IIR filter in the time domain is
+ //
+ // y[n] = sum(ff[k]*x[n-k], k, 0, M) - sum(fb[k]*y[n-k], k, 1, N)
+ //
+ function iirFilter(input, feedforward, feedback) {
+ // For simplicity, create an x buffer that contains the input, and a y buffer that contains
+ // the output. Both of these buffers have an initial work space to implement the initial
+ // memory of the filter.
+ var workSize = Math.max(feedforward.length, feedback.length);
+ var x = new Float32Array(input.length + workSize);
+
+ // Float64 because we want to match the implementation that uses doubles to minimize
+ // roundoff.
+ var y = new Float64Array(input.length + workSize);
+
+ // Copy the input over.
+ for (var k = 0; k < input.length; ++k)
+ x[k + feedforward.length] = input[k];
+
+ // Run the filter
+ for (var n = 0; n < input.length; ++n) {
+ var index = n + workSize;
+ var yn = 0;
+ for (var k = 0; k < feedforward.length; ++k)
+ yn += feedforward[k] * x[index - k];
+ for (var k = 0; k < feedback.length; ++k)
+ yn -= feedback[k] * y[index - k];
+
+ y[index] = yn;
+ }
+
+ return y.slice(workSize).map(Math.fround);
+ }
+
+ // Cascade the two given biquad filters to create one IIR filter.
+ function cascadeBiquads(f1Coef, f2Coef) {
+ // The biquad filters are:
+ //
+ // f1 = (b10 + b11/z + b12/z^2)/(1 + a11/z + a12/z^2);
+ // f2 = (b20 + b21/z + b22/z^2)/(1 + a21/z + a22/z^2);
+ //
+ // To cascade them, multiply the two transforms together to get a fourth order IIR filter.
+
+ var numProduct = [f1Coef.b0 * f2Coef.b0,
+ f1Coef.b0 * f2Coef.b1 + f1Coef.b1 * f2Coef.b0,
+ f1Coef.b0 * f2Coef.b2 + f1Coef.b1 * f2Coef.b1 + f1Coef.b2 * f2Coef.b0,
+ f1Coef.b1 * f2Coef.b2 + f1Coef.b2 * f2Coef.b1,
+ f1Coef.b2 * f2Coef.b2
+ ];
+
+ var denProduct = [1,
+ f2Coef.a1 + f1Coef.a1,
+ f2Coef.a2 + f1Coef.a1 * f2Coef.a1 + f1Coef.a2,
+ f1Coef.a1 * f2Coef.a2 + f1Coef.a2 * f2Coef.a1,
+ f1Coef.a2 * f2Coef.a2
+ ];
+
+ return {
+ ff: numProduct,
+ fb: denProduct
+ }
+ }
+
+ // Find the magnitude of the root of the quadratic that has the maximum magnitude.
+ //
+ // The quadratic is z^2 + a1 * z + a2 and we want the root z that has the largest magnitude.
+ function largestRootMagnitude(a1, a2) {
+ var discriminant = a1 * a1 - 4 * a2;
+ if (discriminant < 0) {
+ // Complex roots: -a1/2 +/- i*sqrt(-d)/2. Thus the magnitude of each root is the same
+ // and is sqrt(a1^2/4 + |d|/4)
+ var d = Math.sqrt(-discriminant);
+ return Math.hypot(a1 / 2, d / 2);
+ } else {
+ // Real roots
+ var d = Math.sqrt(discriminant);
+ return Math.max(Math.abs((-a1 + d) / 2), Math.abs((-a1 - d) / 2));
+ }
+ }
+
+ // Cascade 2 lowpass biquad filters and compare that with the equivalent 4th order IIR
+ // filter.
+
+ var nyquist = sampleRate / 2;
+ // Compute the coefficients of a lowpass filter.
+
+ // First some preliminary stuff. Compute the coefficients of the biquad. This is used to
+ // figure out how frames to use in the test.
+ var biquadType = "lowpass";
+ var biquadCutoff = 350;
+ var biquadQ = 5;
+ var biquadGain = 1;
+
+ var coef = createFilter(biquadType,
+ biquadCutoff / nyquist,
+ biquadQ,
+ biquadGain);
+
+ // Cascade the biquads together to create an equivalent IIR filter.
+ var cascade = cascadeBiquads(coef, coef);
+
+ // Since we're cascading two identical biquads, the root of denominator of the IIR filter is
+ // repeated, so the root of the denominator with the largest magnitude occurs twice. The
+ // impulse response of the IIR filter will be roughly c*(r*r)^n at time n, where r is the
+ // root of largest magnitude. This approximation gets better as n increases. We can use
+ // this to get a rough idea of when the response has died down to a small value.
+
+ // This is the value we will use to determine how many frames to render. Rendering too many
+ // is a waste of time and also makes it hard to compare the actual result to the expected
+ // because the magnitudes are so small that they could be mostly round-off noise.
+ //
+ // Find magnitude of the root with largest magnitude
+ var rootMagnitude = largestRootMagnitude(coef.a1, coef.a2);
+
+ // Find n such that |r|^(2*n) <= eps. That is, n = log(eps)/(2*log(r)). Somewhat
+ // arbitrarily choose eps = 1e-20;
+ var eps = 1e-20;
+ var framesForTest = Math.floor(Math.log(eps) / (2 * Math.log(rootMagnitude)));
+
+ // We're ready to create the graph for the test. The offline context has two channels:
+ // channel 0 is the expected (cascaded biquad) result and channel 1 is the actual IIR filter
+ // result.
+ var context = new OfflineAudioContext(2, framesForTest, sampleRate);
+
+ // Use a simple impulse with a large (arbitrary) amplitude as the source
+ var amplitude = 1;
+ var buffer = context.createBuffer(1, testFrames, sampleRate);
+ buffer.getChannelData(0)[0] = amplitude;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Create the two biquad filters. Doesn't really matter what, but for simplicity we choose
+ // identical lowpass filters with the same parameters.
+ var biquad1 = context.createBiquadFilter();
+ biquad1.type = biquadType;
+ biquad1.frequency.value = biquadCutoff;
+ biquad1.Q.value = biquadQ;
+
+ var biquad2 = context.createBiquadFilter();
+ biquad2.type = biquadType;
+ biquad2.frequency.value = biquadCutoff;
+ biquad2.Q.value = biquadQ;
+
+ var iir = context.createIIRFilter(cascade.ff, cascade.fb);
+
+ // Create the merger to get the signals into multiple channels
+ var merger = context.createChannelMerger(2);
+
+ // Create the graph, filtering the source through two biquads.
+ source.connect(biquad1);
+ biquad1.connect(biquad2);
+ biquad2.connect(merger, 0, 0);
+
+ source.connect(iir);
+ iir.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ // Now filter the source through the IIR filter.
+ var y = iirFilter(buffer.getChannelData(0), cascade.ff, cascade.fb);
+
+ // Rock and roll!
+ source.start();
+
+ return context.startRendering().then(function(result) {
+ var expected = result.getChannelData(0);
+ var actual = result.getChannelData(1);
+
+ compareChannels(actual, expected);
+
+ });
+ }());
+
+ // Wait for all tests
+ Promise.all(testPromises).then(function () {
+ SimpleTest.finish();
+ }, function () {
+ SimpleTest.finish();
+ });
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html b/dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html
new file mode 100644
index 0000000000..09782f73be
--- /dev/null
+++ b/dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html
@@ -0,0 +1,97 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test IIRFilterNode GetFrequencyResponse</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <script type="text/javascript" src="biquad-filters.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ // Modified from WebKit/LayoutTests/webaudio/iirfilter-getFrequencyResponse.html
+ var sampleRate = 48000;
+ var testDurationSec = 0.01;
+
+ // Compute a set of linearly spaced frequencies.
+ function createFrequencies(nFrequencies, sampleRate)
+ {
+ var frequencies = new Float32Array(nFrequencies);
+ var nyquist = sampleRate / 2;
+ var freqDelta = nyquist / nFrequencies;
+
+ for (var k = 0; k < nFrequencies; ++k) {
+ frequencies[k] = k * freqDelta;
+ }
+
+ return frequencies;
+ }
+
+ // Number of frequency samples to take.
+ var numberOfFrequencies = 1000;
+
+ var context = new OfflineAudioContext(1, testDurationSec * sampleRate, sampleRate);
+
+ var frequencies = createFrequencies(numberOfFrequencies, context.sampleRate);
+
+ // 1-Pole IIR Filter
+ var iir = context.createIIRFilter([1], [1, -0.9]);
+
+ var iirMag = new Float32Array(numberOfFrequencies);
+ var iirPhase = new Float32Array(numberOfFrequencies);
+ var trueMag = new Float32Array(numberOfFrequencies);
+ var truePhase = new Float32Array(numberOfFrequencies);
+
+ // The IIR filter is
+ // H(z) = 1/(1 - 0.9*z^(-1)).
+ //
+ // The frequency response is
+ // H(exp(j*w)) = 1/(1 - 0.9*exp(-j*w)).
+ //
+ // Thus, the magnitude is
+ // |H(exp(j*w))| = 1/sqrt(1.81-1.8*cos(w)).
+ //
+ // The phase is
+ // arg(H(exp(j*w)) = atan(0.9*sin(w)/(.9*cos(w)-1))
+
+ var frequencyScale = Math.PI / (sampleRate / 2);
+
+ for (var k = 0; k < frequencies.length; ++k) {
+ var omega = frequencyScale * frequencies[k];
+ trueMag[k] = 1/Math.sqrt(1.81-1.8*Math.cos(omega));
+ truePhase[k] = Math.atan(0.9 * Math.sin(omega) / (0.9 * Math.cos(omega) - 1));
+ }
+
+ iir.getFrequencyResponse(frequencies, iirMag, iirPhase);
+ compareChannels(iirMag, trueMag);
+ compareChannels(iirPhase, truePhase);
+
+ // Compare IIR and Biquad Filter
+ // Create an IIR filter equivalent to the biquad filter. Compute the frequency response for both and verify that they are the same.
+ var biquad = context.createBiquadFilter();
+ var coef = createFilter(biquad.type,
+ biquad.frequency.value / (context.sampleRate / 2),
+ biquad.Q.value,
+ biquad.gain.value);
+
+ var iir = context.createIIRFilter([coef.b0, coef.b1, coef.b2], [1, coef.a1, coef.a2]);
+
+ var biquadMag = new Float32Array(numberOfFrequencies);
+ var biquadPhase = new Float32Array(numberOfFrequencies);
+ var iirMag = new Float32Array(numberOfFrequencies);
+ var iirPhase = new Float32Array(numberOfFrequencies);
+
+ biquad.getFrequencyResponse(frequencies, biquadMag, biquadPhase);
+ iir.getFrequencyResponse(frequencies, iirMag, iirPhase);
+ compareChannels(iirMag, biquadMag);
+ compareChannels(iirPhase, biquadPhase);
+
+ SimpleTest.finish();
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/corsServer.sjs b/dom/media/webaudio/test/corsServer.sjs
new file mode 100644
index 0000000000..45c694cf08
--- /dev/null
+++ b/dom/media/webaudio/test/corsServer.sjs
@@ -0,0 +1,26 @@
+function handleRequest(request, response) {
+ var file = Components.classes["@mozilla.org/file/directory_service;1"]
+ .getService(Components.interfaces.nsIProperties)
+ .get("CurWorkD", Components.interfaces.nsIFile);
+ var fis = Components.classes[
+ "@mozilla.org/network/file-input-stream;1"
+ ].createInstance(Components.interfaces.nsIFileInputStream);
+ var bis = Components.classes[
+ "@mozilla.org/binaryinputstream;1"
+ ].createInstance(Components.interfaces.nsIBinaryInputStream);
+ var paths = "tests/dom/media/webaudio/test/small-shot.ogg";
+ var split = paths.split("/");
+ for (var i = 0; i < split.length; ++i) {
+ file.append(split[i]);
+ }
+ fis.init(file, -1, -1, false);
+ bis.setInputStream(fis);
+ var bytes = bis.readBytes(bis.available());
+ response.setHeader("Content-Type", "video/ogg", false);
+ response.setHeader("Content-Length", "" + bytes.length, false);
+ response.setHeader("Access-Control-Allow-Origin", "*", false);
+ response.write(bytes, bytes.length);
+ response.processAsync();
+ response.finish();
+ bis.close();
+}
diff --git a/dom/media/webaudio/test/file_nodeCreationDocumentGone.html b/dom/media/webaudio/test/file_nodeCreationDocumentGone.html
new file mode 100644
index 0000000000..aedf16702f
--- /dev/null
+++ b/dom/media/webaudio/test/file_nodeCreationDocumentGone.html
@@ -0,0 +1,4 @@
+<!DOCTYPE html>
+<html><script>
+var context = new AudioContext();
+setTimeout(function(){window.close();},1000);</script></html>
diff --git a/dom/media/webaudio/test/generate-test-files.py b/dom/media/webaudio/test/generate-test-files.py
new file mode 100755
index 0000000000..af4bc3bb49
--- /dev/null
+++ b/dom/media/webaudio/test/generate-test-files.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+
+import os
+
+rates = [44100, 48000]
+channels = [1, 2]
+duration = "0.5"
+frequency = "1000"
+volume = "-3dB"
+name = "half-a-second"
+formats = {
+ "aac-in-adts": [{"codec": "aac", "extension": "aac"}],
+ "mp3": [{"codec": "libmp3lame", "extension": "mp3"}],
+ "mp4": [
+ {
+ "codec": "libopus",
+ "extension": "mp4",
+ },
+ {"codec": "aac", "extension": "mp4"},
+ ],
+ "ogg": [
+ {"codec": "libvorbis", "extension": "ogg"},
+ {"codec": "libopus", "extension": "opus"},
+ ],
+ "flac": [
+ {"codec": "flac", "extension": "flac"},
+ ],
+ "webm": [
+ {"codec": "libopus", "extension": "webm"},
+ {"codec": "libvorbis", "extension": "webm"},
+ ],
+}
+
+for rate in rates:
+ for channel_count in channels:
+ wav_filename = "{}-{}ch-{}.wav".format(name, channel_count, rate)
+ wav_command = "sox -V -r {} -n -b 16 -c {} {} synth {} sin {} vol {}".format(
+ rate, channel_count, wav_filename, duration, frequency, volume
+ )
+ print(wav_command)
+ os.system(wav_command)
+ for container, codecs in formats.items():
+ for codec in codecs:
+ encoded_filename = "{}-{}ch-{}-{}.{}".format(
+ name, channel_count, rate, codec["codec"], codec["extension"]
+ )
+ print(encoded_filename)
+ encoded_command = "ffmpeg -y -i {} -acodec {} {}".format(
+ wav_filename, codec["codec"], encoded_filename
+ )
+ print(encoded_command)
+ os.system(encoded_command)
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-aac-afconvert.mp4 b/dom/media/webaudio/test/half-a-second-1ch-44100-aac-afconvert.mp4
new file mode 100644
index 0000000000..7e3b008376
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-aac-afconvert.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-aac.aac b/dom/media/webaudio/test/half-a-second-1ch-44100-aac.aac
new file mode 100644
index 0000000000..28435589ae
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-aac.aac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-aac.mp4 b/dom/media/webaudio/test/half-a-second-1ch-44100-aac.mp4
new file mode 100644
index 0000000000..c603635f6e
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-aac.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-flac.flac b/dom/media/webaudio/test/half-a-second-1ch-44100-flac.flac
new file mode 100644
index 0000000000..49f71674f5
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-flac.flac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-libmp3lame.mp3 b/dom/media/webaudio/test/half-a-second-1ch-44100-libmp3lame.mp3
new file mode 100644
index 0000000000..fb1ec45af2
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-libmp3lame.mp3
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.mp4 b/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.mp4
new file mode 100644
index 0000000000..3a7df17582
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.opus b/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.opus
new file mode 100644
index 0000000000..304b9f9d1d
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.opus
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.webm b/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.webm
new file mode 100644
index 0000000000..71be30de9c
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-libopus.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.ogg b/dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.ogg
new file mode 100644
index 0000000000..ab5ec06e50
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.webm b/dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.webm
new file mode 100644
index 0000000000..b5142703ba
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100-libvorbis.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-44100.wav b/dom/media/webaudio/test/half-a-second-1ch-44100.wav
new file mode 100644
index 0000000000..0028a66007
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-44100.wav
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-aac.aac b/dom/media/webaudio/test/half-a-second-1ch-48000-aac.aac
new file mode 100644
index 0000000000..e1c5ba4631
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-aac.aac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-aac.mp4 b/dom/media/webaudio/test/half-a-second-1ch-48000-aac.mp4
new file mode 100644
index 0000000000..089d2a93e1
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-aac.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-flac.flac b/dom/media/webaudio/test/half-a-second-1ch-48000-flac.flac
new file mode 100644
index 0000000000..783bbf2c97
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-flac.flac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-libmp3lame.mp3 b/dom/media/webaudio/test/half-a-second-1ch-48000-libmp3lame.mp3
new file mode 100644
index 0000000000..f9dfe29a89
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-libmp3lame.mp3
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.mp4 b/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.mp4
new file mode 100644
index 0000000000..eb48fdac1b
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.opus b/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.opus
new file mode 100644
index 0000000000..1b7cefcb3f
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.opus
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.webm b/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.webm
new file mode 100644
index 0000000000..c06e5d7583
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-libopus.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.ogg b/dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.ogg
new file mode 100644
index 0000000000..ad88da968c
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.webm b/dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.webm
new file mode 100644
index 0000000000..d63e2c31d3
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000-libvorbis.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-1ch-48000.wav b/dom/media/webaudio/test/half-a-second-1ch-48000.wav
new file mode 100644
index 0000000000..d1fcb21134
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-1ch-48000.wav
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-aac.aac b/dom/media/webaudio/test/half-a-second-2ch-44100-aac.aac
new file mode 100644
index 0000000000..d2255e982b
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-aac.aac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-aac.mp4 b/dom/media/webaudio/test/half-a-second-2ch-44100-aac.mp4
new file mode 100644
index 0000000000..fbdd17e416
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-aac.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-flac.flac b/dom/media/webaudio/test/half-a-second-2ch-44100-flac.flac
new file mode 100644
index 0000000000..9cc57c24bd
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-flac.flac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-libmp3lame.mp3 b/dom/media/webaudio/test/half-a-second-2ch-44100-libmp3lame.mp3
new file mode 100644
index 0000000000..399df50839
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-libmp3lame.mp3
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.mp4 b/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.mp4
new file mode 100644
index 0000000000..242fb3e12e
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.opus b/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.opus
new file mode 100644
index 0000000000..a9311b5f9c
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.opus
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.webm b/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.webm
new file mode 100644
index 0000000000..ca8876d5e1
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-libopus.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.ogg b/dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.ogg
new file mode 100644
index 0000000000..edf76edf89
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.webm b/dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.webm
new file mode 100644
index 0000000000..b01575c526
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100-libvorbis.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-44100.wav b/dom/media/webaudio/test/half-a-second-2ch-44100.wav
new file mode 100644
index 0000000000..ae37e12813
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-44100.wav
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-aac.aac b/dom/media/webaudio/test/half-a-second-2ch-48000-aac.aac
new file mode 100644
index 0000000000..d26803d76f
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-aac.aac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-aac.mp4 b/dom/media/webaudio/test/half-a-second-2ch-48000-aac.mp4
new file mode 100644
index 0000000000..d7e3140580
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-aac.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-flac.flac b/dom/media/webaudio/test/half-a-second-2ch-48000-flac.flac
new file mode 100644
index 0000000000..624e5280ff
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-flac.flac
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-libmp3lame.mp3 b/dom/media/webaudio/test/half-a-second-2ch-48000-libmp3lame.mp3
new file mode 100644
index 0000000000..bd009ebfb4
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-libmp3lame.mp3
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.mp4 b/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.mp4
new file mode 100644
index 0000000000..89e2b19256
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.mp4
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.opus b/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.opus
new file mode 100644
index 0000000000..1e3c72b7b2
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.opus
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.webm b/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.webm
new file mode 100644
index 0000000000..c7306df2ef
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-libopus.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.ogg b/dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.ogg
new file mode 100644
index 0000000000..63e6d2bb87
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.webm b/dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.webm
new file mode 100644
index 0000000000..589370d6e2
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000-libvorbis.webm
Binary files differ
diff --git a/dom/media/webaudio/test/half-a-second-2ch-48000.wav b/dom/media/webaudio/test/half-a-second-2ch-48000.wav
new file mode 100644
index 0000000000..e9fc21e30b
--- /dev/null
+++ b/dom/media/webaudio/test/half-a-second-2ch-48000.wav
Binary files differ
diff --git a/dom/media/webaudio/test/invalid.txt b/dom/media/webaudio/test/invalid.txt
new file mode 100644
index 0000000000..c44840faf1
--- /dev/null
+++ b/dom/media/webaudio/test/invalid.txt
@@ -0,0 +1 @@
+Surely this is not an audio file!
diff --git a/dom/media/webaudio/test/invalidContent.flac b/dom/media/webaudio/test/invalidContent.flac
new file mode 100644
index 0000000000..b2f4e1ff7a
--- /dev/null
+++ b/dom/media/webaudio/test/invalidContent.flac
@@ -0,0 +1 @@
+fLaC
diff --git a/dom/media/webaudio/test/layouttest-glue.js b/dom/media/webaudio/test/layouttest-glue.js
new file mode 100644
index 0000000000..0ed0b9dc90
--- /dev/null
+++ b/dom/media/webaudio/test/layouttest-glue.js
@@ -0,0 +1,18 @@
+// Reimplementation of the LayoutTest API from Blink so we can easily port
+// WebAudio tests to Simpletest, without touching the internals of the test.
+
+function testFailed(msg) {
+ ok(false, msg);
+}
+
+function testPassed(msg) {
+ ok(true, msg);
+}
+
+function finishJSTest() {
+ SimpleTest.finish();
+}
+
+function description(str) {
+ info(str);
+}
diff --git a/dom/media/webaudio/test/mochitest.ini b/dom/media/webaudio/test/mochitest.ini
new file mode 100644
index 0000000000..0d303702eb
--- /dev/null
+++ b/dom/media/webaudio/test/mochitest.ini
@@ -0,0 +1,215 @@
+[DEFAULT]
+tags = mtg webaudio
+subsuite = media
+support-files =
+ 8kHz-320kbps-6ch.aac
+ audio-expected.wav
+ audio-mono-expected-2.wav
+ audio-mono-expected.wav
+ audio-quad.wav
+ audio.ogv
+ audiovideo.mp4
+ audioBufferSourceNodeDetached_worker.js
+ corsServer.sjs
+ !/dom/events/test/event_leak_utils.js
+ file_nodeCreationDocumentGone.html
+ invalid.txt
+ invalidContent.flac
+ layouttest-glue.js
+ nil-packet.ogg
+ noaudio.webm
+ small-shot-expected.wav
+ small-shot-mono-expected.wav
+ small-shot.ogg
+ small-shot.mp3
+ sweep-300-330-1sec.opus
+ ting-44.1k-1ch.ogg
+ ting-44.1k-2ch.ogg
+ ting-48k-1ch.ogg
+ ting-48k-2ch.ogg
+ ting-44.1k-1ch.wav
+ ting-44.1k-2ch.wav
+ ting-48k-1ch.wav
+ ting-48k-2ch.wav
+ sine-440-10s.opus
+ webaudio.js
+ # See ./generate-test-files.py
+ half-a-second-1ch-44100-aac.aac
+ half-a-second-1ch-44100-aac.mp4
+ half-a-second-1ch-44100-flac.flac
+ half-a-second-1ch-44100-libmp3lame.mp3
+ half-a-second-1ch-44100-libopus.mp4
+ half-a-second-1ch-44100-libopus.opus
+ half-a-second-1ch-44100-libopus.webm
+ half-a-second-1ch-44100-libvorbis.ogg
+ half-a-second-1ch-44100-libvorbis.webm
+ half-a-second-1ch-44100.wav
+ half-a-second-1ch-48000-aac.aac
+ half-a-second-1ch-48000-aac.mp4
+ half-a-second-1ch-48000-flac.flac
+ half-a-second-1ch-48000-libmp3lame.mp3
+ half-a-second-1ch-48000-libopus.mp4
+ half-a-second-1ch-48000-libopus.opus
+ half-a-second-1ch-48000-libopus.webm
+ half-a-second-1ch-48000-libvorbis.ogg
+ half-a-second-1ch-48000-libvorbis.webm
+ half-a-second-1ch-48000.wav
+ half-a-second-2ch-44100-aac.aac
+ half-a-second-2ch-44100-aac.mp4
+ half-a-second-2ch-44100-flac.flac
+ half-a-second-2ch-44100-libmp3lame.mp3
+ half-a-second-2ch-44100-libopus.mp4
+ half-a-second-2ch-44100-libopus.opus
+ half-a-second-2ch-44100-libopus.webm
+ half-a-second-2ch-44100-libvorbis.ogg
+ half-a-second-2ch-44100-libvorbis.webm
+ half-a-second-2ch-44100.wav
+ half-a-second-2ch-48000-aac.aac
+ half-a-second-2ch-48000-aac.mp4
+ half-a-second-2ch-48000-flac.flac
+ half-a-second-2ch-48000-libmp3lame.mp3
+ half-a-second-2ch-48000-libopus.mp4
+ half-a-second-2ch-48000-libopus.opus
+ half-a-second-2ch-48000-libopus.webm
+ half-a-second-2ch-48000-libvorbis.ogg
+ half-a-second-2ch-48000-libvorbis.webm
+ half-a-second-2ch-48000.wav
+ half-a-second-1ch-44100-aac-afconvert.mp4
+ sixteen-frames.mp3 # only 16 frames of valid audio
+ ../../webrtc/tests/mochitests/mediaStreamPlayback.js
+ ../../webrtc/tests/mochitests/head.js
+
+[test_analyserNode.html]
+skip-if = !asan && toolkit != android # These are tested in web-platform-tests, except on ASan and Android which don't run WPT.
+[test_analyserScale.html]
+skip-if = !asan && toolkit != android # These are tested in web-platform-tests, except on ASan and Android which don't run WPT.
+[test_analyserNodeOutput.html]
+skip-if = !asan && toolkit != android # These are tested in web-platform-tests, except on ASan and Android which don't run WPT.
+[test_analyserNodePassThrough.html]
+[test_analyserNodeWithGain.html]
+skip-if = !asan && toolkit != android # These are tested in web-platform-tests, except on ASan and Android which don't run WPT.
+[test_analyserNodeMinimum.html]
+skip-if = !asan && toolkit != android # These are tested in web-platform-tests, except on ASan and Android which don't run WPT.
+[test_channelMergerNode.html]
+[test_channelMergerNodeWithVolume.html]
+[test_channelSplitterNode.html]
+[test_channelSplitterNodeWithVolume.html]
+[test_convolverNode.html]
+[test_convolverNode_mono_mono.html]
+[test_convolverNodeChannelCount.html]
+[test_convolverNodeChannelInterpretationChanges.html]
+[test_convolverNodeDelay.html]
+[test_convolverNodeFiniteInfluence.html]
+[test_convolverNodeOOM.html]
+skip-if = asan
+ tsan # 1672869
+[test_convolverNodeNormalization.html]
+[test_convolverNodePassThrough.html]
+[test_convolverNodeWithGain.html]
+[test_convolver-upmixing-1-channel-response.html]
+# This is a copy of
+# testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-upmixing-1-channel-response.html,
+# but WPT are not run with ASan or Android builds.
+skip-if = !asan && toolkit != android
+[test_currentTime.html]
+[test_decodeAudioDataOnDetachedBuffer.html]
+[test_decodeAudioDataPromise.html]
+[test_decodeAudioError.html]
+[test_decodeMultichannel.html]
+[test_decodeOpusTail.html]
+[test_decoderDelay.html]
+[test_delayNode.html]
+[test_delayNodeAtMax.html]
+[test_delayNodeChannelChanges.html]
+[test_delayNodeCycles.html]
+[test_delayNodePassThrough.html]
+[test_delayNodeSmallMaxDelay.html]
+[test_delayNodeTailIncrease.html]
+[test_delayNodeTailWithDisconnect.html]
+[test_delayNodeTailWithGain.html]
+[test_delayNodeTailWithReconnect.html]
+[test_delayNodeWithGain.html]
+[test_delaynode-channel-count-1.html]
+# This is a copy of
+# testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-channel-count-1.html
+# but WPT are not run with ASan or Android builds.
+skip-if = !asan && toolkit != android
+[test_disconnectAll.html]
+[test_disconnectAudioParam.html]
+[test_disconnectAudioParamFromOutput.html]
+[test_disconnectExceptions.html]
+[test_disconnectFromAudioNode.html]
+[test_disconnectFromAudioNodeAndOutput.html]
+[test_disconnectFromAudioNodeAndOutputAndInput.html]
+[test_disconnectFromAudioNodeMultipleConnection.html]
+[test_disconnectFromOutput.html]
+[test_dynamicsCompressorNode.html]
+[test_dynamicsCompressorNodePassThrough.html]
+[test_dynamicsCompressorNodeWithGain.html]
+[test_event_listener_leaks.html]
+skip-if = (os == 'win' && processor == 'aarch64') # bug 1531927
+[test_gainNode.html]
+[test_gainNodeInLoop.html]
+[test_gainNodePassThrough.html]
+[test_iirFilterNodePassThrough.html]
+[test_maxChannelCount.html]
+skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1538360
+[test_mixingRules.html]
+[test_nodeToParamConnection.html]
+[test_nodeCreationDocumentGone.html]
+[test_notAllowedToStartAudioContextGC.html]
+[test_OfflineAudioContext.html]
+[test_offlineDestinationChannelCountLess.html]
+[test_offlineDestinationChannelCountMore.html]
+[test_oscillatorNode.html]
+[test_oscillatorNode2.html]
+[test_oscillatorNodeNegativeFrequency.html]
+[test_oscillatorNodePassThrough.html]
+[test_oscillatorNodeStart.html]
+[test_oscillatorTypeChange.html]
+[test_pannerNode.html]
+[test_pannerNode_equalPower.html]
+[test_pannerNode_audioparam_distance.html]
+[test_pannerNodeAbove.html]
+[test_pannerNodeAtZeroDistance.html]
+[test_pannerNodeChannelCount.html]
+[test_pannerNodeHRTFSymmetry.html]
+[test_pannerNodeTail.html]
+[test_pannerNode_maxDistance.html]
+[test_slowStart.html]
+[test_setValueCurveWithNonFiniteElements.html]
+[test_stereoPannerNode.html]
+[test_stereoPannerNodePassThrough.html]
+[test_periodicWave.html]
+[test_periodicWaveDisableNormalization.html]
+[test_periodicWaveBandLimiting.html]
+[test_retrospective-exponentialRampToValueAtTime.html]
+[test_retrospective-linearRampToValueAtTime.html]
+[test_retrospective-setTargetAtTime.html]
+[test_retrospective-setValueAtTime.html]
+[test_retrospective-setValueCurveAtTime.html]
+[test_ScriptProcessorCollected1.html]
+[test_scriptProcessorNode.html]
+[test_scriptProcessorNodeChannelCount.html]
+[test_scriptProcessorNodePassThrough.html]
+[test_scriptProcessorNode_playbackTime1.html]
+[test_scriptProcessorNodeZeroInputOutput.html]
+[test_scriptProcessorNodeNotConnected.html]
+[test_sequentialBufferSourceWithResampling.html]
+[test_singleSourceDest.html]
+skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1538360
+[test_stereoPanningWithGain.html]
+[test_waveDecoder.html]
+[test_waveShaper.html]
+[test_waveShaperGain.html]
+[test_waveShaperNoCurve.html]
+[test_waveShaperPassThrough.html]
+[test_waveShaperInvalidLengthCurve.html]
+[test_WebAudioMemoryReporting.html]
+[test_audioContextParams_sampleRate.html]
+[test_webAudio_muteTab.html]
+scheme = https
+skip-if = os == 'mac'
+ os == 'win'
+ toolkit == 'android' # Bug 1404995, no loopback devices on some platforms
+[test_audioContextParams_recordNonDefaultSampleRate.html]
diff --git a/dom/media/webaudio/test/mochitest_audio.ini b/dom/media/webaudio/test/mochitest_audio.ini
new file mode 100644
index 0000000000..8f6c35f9a7
--- /dev/null
+++ b/dom/media/webaudio/test/mochitest_audio.ini
@@ -0,0 +1,69 @@
+[DEFAULT]
+tags = mtg webaudio
+subsuite = media
+support-files =
+ audio-expected.wav
+ audio-mono-expected-2.wav
+ audio-mono-expected.wav
+ audio-quad.wav
+ audio.ogv
+ audiovideo.mp4
+ audioBufferSourceNodeDetached_worker.js
+ corsServer.sjs
+ !/dom/events/test/event_leak_utils.js
+ file_nodeCreationDocumentGone.html
+ invalid.txt
+ invalidContent.flac
+ layouttest-glue.js
+ nil-packet.ogg
+ noaudio.webm
+ small-shot-expected.wav
+ small-shot-mono-expected.wav
+ small-shot.ogg
+ small-shot.mp3
+ sweep-300-330-1sec.opus
+ ting-44.1k-1ch.ogg
+ ting-44.1k-2ch.ogg
+ ting-48k-1ch.ogg
+ ting-48k-2ch.ogg
+ ting-44.1k-1ch.wav
+ ting-44.1k-2ch.wav
+ ting-48k-1ch.wav
+ ting-48k-2ch.wav
+ sine-440-10s.opus
+ webaudio.js
+ ../../webrtc/tests/mochitests/mediaStreamPlayback.js
+ ../../webrtc/tests/mochitests/head.js
+
+[test_AudioBuffer.html]
+[test_audioBufferSourceNode.html]
+[test_audioBufferSourceNodeEnded.html]
+[test_audioBufferSourceNodeLazyLoopParam.html]
+[test_audioBufferSourceNodeLoop.html]
+[test_audioBufferSourceNodeLoopStartEnd.html]
+[test_audioBufferSourceNodeLoopStartEndSame.html]
+[test_audioBufferSourceNodeDetached.html]
+[test_audioBufferSourceNodeNoStart.html]
+[test_audioBufferSourceNodeNullBuffer.html]
+[test_audioBufferSourceNodeOffset.html]
+[test_audioBufferSourceNodePassThrough.html]
+[test_audioBufferSourceNodeRate.html]
+[test_AudioContext.html]
+[test_AudioContext_disabled.html]
+[test_audioContextGC.html]
+[test_audioContextSuspendResumeClose.html]
+tags=capturestream
+skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1539522
+[test_audioDestinationNode.html]
+[test_AudioListener.html]
+[test_AudioNodeDevtoolsAPI.html]
+[test_audioParamChaining.html]
+[test_AudioParamDevtoolsAPI.html]
+[test_audioParamExponentialRamp.html]
+[test_audioParamGain.html]
+[test_audioParamLinearRamp.html]
+[test_audioParamSetCurveAtTime.html]
+[test_audioParamSetTargetAtTime.html]
+[test_audioParamSetTargetAtTimeZeroTimeConstant.html]
+[test_audioParamSetValueAtTime.html]
+[test_audioParamTimelineDestinationOffset.html]
diff --git a/dom/media/webaudio/test/mochitest_bugs.ini b/dom/media/webaudio/test/mochitest_bugs.ini
new file mode 100644
index 0000000000..66b645673a
--- /dev/null
+++ b/dom/media/webaudio/test/mochitest_bugs.ini
@@ -0,0 +1,65 @@
+[DEFAULT]
+tags = mtg webaudio
+subsuite = media
+support-files =
+ audio-expected.wav
+ audio-mono-expected-2.wav
+ audio-mono-expected.wav
+ audio-quad.wav
+ audio.ogv
+ audiovideo.mp4
+ audioBufferSourceNodeDetached_worker.js
+ corsServer.sjs
+ !/dom/events/test/event_leak_utils.js
+ file_nodeCreationDocumentGone.html
+ invalid.txt
+ invalidContent.flac
+ layouttest-glue.js
+ nil-packet.ogg
+ noaudio.webm
+ small-shot-expected.wav
+ small-shot-mono-expected.wav
+ small-shot.ogg
+ small-shot.mp3
+ sweep-300-330-1sec.opus
+ ting-44.1k-1ch.ogg
+ ting-44.1k-2ch.ogg
+ ting-48k-1ch.ogg
+ ting-48k-2ch.ogg
+ ting-44.1k-1ch.wav
+ ting-44.1k-2ch.wav
+ ting-48k-1ch.wav
+ ting-48k-2ch.wav
+ sine-440-10s.opus
+ webaudio.js
+ ../../webrtc/tests/mochitests/mediaStreamPlayback.js
+ ../../webrtc/tests/mochitests/head.js
+
+[test_bug808374.html]
+[test_bug827541.html]
+[test_bug839753.html]
+[test_bug845960.html]
+[test_bug856771.html]
+[test_bug866570.html]
+[test_bug866737.html]
+[test_bug867089.html]
+[test_bug867174.html]
+[test_bug873335.html]
+[test_bug875221.html]
+[test_bug875402.html]
+[test_bug894150.html]
+[test_bug956489.html]
+[test_bug964376.html]
+[test_bug966247.html]
+tags=capturestream
+[test_bug972678.html]
+[test_bug1113634.html]
+[test_bug1118372.html]
+[test_bug1027864.html]
+skip-if = true #Bug 1650930
+[test_bug1056032.html]
+[test_bug1255618.html]
+skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1538360
+[test_bug1267579.html]
+[test_bug1355798.html]
+[test_bug1447273.html]
diff --git a/dom/media/webaudio/test/mochitest_media.ini b/dom/media/webaudio/test/mochitest_media.ini
new file mode 100644
index 0000000000..be153c90c8
--- /dev/null
+++ b/dom/media/webaudio/test/mochitest_media.ini
@@ -0,0 +1,64 @@
+[DEFAULT]
+tags = mtg webaudio
+subsuite = media
+support-files =
+ audio-expected.wav
+ audio-mono-expected-2.wav
+ audio-mono-expected.wav
+ audio-quad.wav
+ audio.ogv
+ audiovideo.mp4
+ audioBufferSourceNodeDetached_worker.js
+ corsServer.sjs
+ !/dom/events/test/event_leak_utils.js
+ file_nodeCreationDocumentGone.html
+ invalid.txt
+ invalidContent.flac
+ layouttest-glue.js
+ nil-packet.ogg
+ noaudio.webm
+ small-shot-expected.wav
+ small-shot-mono-expected.wav
+ small-shot.ogg
+ small-shot.mp3
+ sweep-300-330-1sec.opus
+ ting-44.1k-1ch.ogg
+ ting-44.1k-2ch.ogg
+ ting-48k-1ch.ogg
+ ting-48k-2ch.ogg
+ ting-44.1k-1ch.wav
+ ting-44.1k-2ch.wav
+ ting-48k-1ch.wav
+ ting-48k-2ch.wav
+ sine-440-10s.opus
+ webaudio.js
+ ../../webrtc/tests/mochitests/mediaStreamPlayback.js
+ ../../webrtc/tests/mochitests/head.js
+
+[test_mediaDecoding.html]
+[test_mediaElementAudioSourceNode.html]
+tags=capturestream
+[test_mediaElementAudioSourceNodeFidelity.html]
+tags=capturestream
+skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1538360
+[test_mediaElementAudioSourceNodePassThrough.html]
+tags=capturestream
+[test_mediaElementAudioSourceNodeVideo.html]
+tags=capturestream
+
+[test_mediaElementAudioSourceNodeCrossOrigin.html]
+tags=capturestream
+[test_mediaStreamAudioDestinationNode.html]
+[test_mediaStreamAudioSourceNode.html]
+[test_mediaStreamAudioSourceNodeCrossOrigin.html]
+tags=capturestream
+[test_mediaStreamAudioSourceNodeNoGC.html]
+skip-if = os == "mac" && debug # Bug 1756880 - lower frequency shutdown hangs
+scheme=https
+
+[test_mediaStreamAudioSourceNodePassThrough.html]
+[test_mediaStreamAudioSourceNodeResampling.html]
+tags=capturestream
+[test_mediaStreamTrackAudioSourceNode.html]
+[test_mediaStreamTrackAudioSourceNodeVideo.html]
+[test_mediaStreamTrackAudioSourceNodeCrossOrigin.html]
diff --git a/dom/media/webaudio/test/nil-packet.ogg b/dom/media/webaudio/test/nil-packet.ogg
new file mode 100644
index 0000000000..7b00b5a63e
--- /dev/null
+++ b/dom/media/webaudio/test/nil-packet.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/noaudio.webm b/dom/media/webaudio/test/noaudio.webm
new file mode 100644
index 0000000000..9207017fb6
--- /dev/null
+++ b/dom/media/webaudio/test/noaudio.webm
Binary files differ
diff --git a/dom/media/webaudio/test/sine-440-10s.opus b/dom/media/webaudio/test/sine-440-10s.opus
new file mode 100644
index 0000000000..eb91020168
--- /dev/null
+++ b/dom/media/webaudio/test/sine-440-10s.opus
Binary files differ
diff --git a/dom/media/webaudio/test/sixteen-frames.mp3 b/dom/media/webaudio/test/sixteen-frames.mp3
new file mode 100644
index 0000000000..1d15dcad59
--- /dev/null
+++ b/dom/media/webaudio/test/sixteen-frames.mp3
Binary files differ
diff --git a/dom/media/webaudio/test/small-shot-expected.wav b/dom/media/webaudio/test/small-shot-expected.wav
new file mode 100644
index 0000000000..2faaa8258b
--- /dev/null
+++ b/dom/media/webaudio/test/small-shot-expected.wav
Binary files differ
diff --git a/dom/media/webaudio/test/small-shot-mono-expected.wav b/dom/media/webaudio/test/small-shot-mono-expected.wav
new file mode 100644
index 0000000000..d4e2647e42
--- /dev/null
+++ b/dom/media/webaudio/test/small-shot-mono-expected.wav
Binary files differ
diff --git a/dom/media/webaudio/test/small-shot.mp3 b/dom/media/webaudio/test/small-shot.mp3
new file mode 100644
index 0000000000..f9397a5106
--- /dev/null
+++ b/dom/media/webaudio/test/small-shot.mp3
Binary files differ
diff --git a/dom/media/webaudio/test/small-shot.ogg b/dom/media/webaudio/test/small-shot.ogg
new file mode 100644
index 0000000000..1a41623f81
--- /dev/null
+++ b/dom/media/webaudio/test/small-shot.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/sweep-300-330-1sec.opus b/dom/media/webaudio/test/sweep-300-330-1sec.opus
new file mode 100644
index 0000000000..619d1e0844
--- /dev/null
+++ b/dom/media/webaudio/test/sweep-300-330-1sec.opus
Binary files differ
diff --git a/dom/media/webaudio/test/test_AudioBuffer.html b/dom/media/webaudio/test/test_AudioBuffer.html
new file mode 100644
index 0000000000..05957f679e
--- /dev/null
+++ b/dom/media/webaudio/test/test_AudioBuffer.html
@@ -0,0 +1,104 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ SpecialPowers.gc(); // Make sure that our channels are accessible after GC
+ ok(buffer, "Buffer was allocated successfully");
+ is(buffer.sampleRate, context.sampleRate, "Correct sample rate");
+ is(buffer.length, 2048, "Correct length");
+ ok(Math.abs(buffer.duration - 2048 / context.sampleRate) < 0.0001, "Correct duration");
+ is(buffer.numberOfChannels, 2, "Correct number of channels");
+ for (var i = 0; i < buffer.numberOfChannels; ++i) {
+ var buf = buffer.getChannelData(i);
+ ok(buf, "Buffer index " + i + " exists");
+ ok(buf instanceof Float32Array, "Result is a typed array");
+ is(buf.length, buffer.length, "Correct length");
+ var foundNonZero = false;
+ for (var j = 0; j < buf.length; ++j) {
+ if (buf[j] != 0) {
+ foundNonZero = true;
+ break;
+ }
+ buf[j] = j;
+ }
+ ok(!foundNonZero, "Buffer " + i + " should be initialized to 0");
+ }
+
+ // Now test copying the channel data out of a normal buffer
+ var copy = new Float32Array(100);
+ buffer.copyFromChannel(copy, 0, 1024);
+ for (var i = 0; i < copy.length; ++i) {
+ is(copy[i], 1024 + i, "Correct sample");
+ }
+ // Test copying the channel data out of a playing buffer
+ var srcNode = context.createBufferSource();
+ srcNode.buffer = buffer;
+ srcNode.start(0);
+ copy = new Float32Array(100);
+ buffer.copyFromChannel(copy, 0, 1024);
+ for (var i = 0; i < copy.length; ++i) {
+ is(copy[i], 1024 + i, "Correct sample");
+ }
+
+ // Test copying to the channel data
+ var newData = new Float32Array(200);
+ buffer.copyToChannel(newData, 0, 100);
+ var changedData = buffer.getChannelData(0);
+ for (var i = 0; i < changedData.length; ++i) {
+ if (i < 100 || i >= 300) {
+ is(changedData[i], i, "Untouched sample");
+ } else {
+ is(changedData[i], 0, "Correct sample");
+ }
+ }
+
+ // Now, detach the array buffer
+ var worker = new Worker("audioBufferSourceNodeDetached_worker.js");
+ var data = buffer.getChannelData(0).buffer;
+ worker.postMessage(data, [data]);
+ SpecialPowers.gc();
+
+ expectNoException(function() {
+ buffer.copyFromChannel(copy, 0, 1024);
+ });
+
+ expectNoException(function() {
+ buffer.copyToChannel(newData, 0, 100);
+ });
+
+ expectException(function() {
+ context.createBuffer(2, 2048, 7999);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ context.createBuffer(2, 2048, 192001);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ context.createBuffer(2, 2048, 8000); // no exception
+ context.createBuffer(2, 2048, 192000); // no exception
+ context.createBuffer(32, 2048, 48000); // no exception
+ // Null length
+ expectException(function() {
+ context.createBuffer(2, 0, 48000);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ // Null number of channels
+ expectException(function() {
+ context.createBuffer(0, 2048, 48000);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_AudioContext.html b/dom/media/webaudio/test/test_AudioContext.html
new file mode 100644
index 0000000000..50aeee489e
--- /dev/null
+++ b/dom/media/webaudio/test/test_AudioContext.html
@@ -0,0 +1,23 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var ac = new AudioContext();
+ ok(ac, "Create a AudioContext object");
+ ok(ac instanceof EventTarget, "AudioContexts must be EventTargets");
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_AudioContext_disabled.html b/dom/media/webaudio/test/test_AudioContext_disabled.html
new file mode 100644
index 0000000000..9c3651a883
--- /dev/null
+++ b/dom/media/webaudio/test/test_AudioContext_disabled.html
@@ -0,0 +1,56 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can disable the AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+const webaudio_interfaces = [
+ "AudioContext",
+ "OfflineAudioContext",
+ "AudioContext",
+ "OfflineAudioCompletionEvent",
+ "AudioNode",
+ "AudioDestinationNode",
+ "AudioParam",
+ "GainNode",
+ "DelayNode",
+ "AudioBuffer",
+ "AudioBufferSourceNode",
+ "MediaElementAudioSourceNode",
+ "ScriptProcessorNode",
+ "AudioProcessingEvent",
+ "PannerNode",
+ "AudioListener",
+ "StereoPannerNode",
+ "ConvolverNode",
+ "AnalyserNode",
+ "ChannelSplitterNode",
+ "ChannelMergerNode",
+ "DynamicsCompressorNode",
+ "BiquadFilterNode",
+ "IIRFilterNode",
+ "WaveShaperNode",
+ "OscillatorNode",
+ "PeriodicWave",
+ "MediaStreamAudioSourceNode",
+ "MediaStreamAudioDestinationNode"
+];
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ SpecialPowers.pushPrefEnv({"set": [["dom.webaudio.enabled", false]]}, function() {
+ webaudio_interfaces.forEach((e) => ok(!window[e], e + " must be disabled when the Web Audio API is disabled"));
+ SimpleTest.finish();
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_AudioListener.html b/dom/media/webaudio/test/test_AudioListener.html
new file mode 100644
index 0000000000..e3d605cbcc
--- /dev/null
+++ b/dom/media/webaudio/test/test_AudioListener.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioContext.listener and the AudioListener interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ ok("listener" in context, "AudioContext.listener should exist");
+ // The values set by the following cannot be read from script, but the
+ // implementation is simple enough, so we just make sure that nothing throws.
+ context.listener.setPosition(1.0, 1.0, 1.0);
+ context.listener.setOrientation(1.0, 1.0, 1.0, 1.0, 1.0, 1.0);
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html b/dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html
new file mode 100644
index 0000000000..f032ed88f0
--- /dev/null
+++ b/dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the devtool AudioNode API</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+ SimpleTest.waitForExplicitFinish();
+
+ function id(node) {
+ return SpecialPowers.getPrivilegedProps(node, "id");
+ }
+
+ var ac = new AudioContext();
+ var ids;
+ var weak;
+ (function() {
+ var src1 = ac.createBufferSource();
+ var src2 = ac.createBufferSource();
+ ok(id(src2) > id(src1), "The ID should be monotonic");
+ ok(id(src1) > id(ac.destination), "The ID of the destination node should be the lowest");
+ ids = [id(src1), id(src2)];
+ weak = SpecialPowers.Cu.getWeakReference(src1);
+ is(SpecialPowers.unwrap(weak.get()), src1, "The node should support a weak reference");
+ })();
+ function observer(subject, topic, data) {
+ var id = parseInt(data);
+ var index = ids.indexOf(id);
+ if (index != -1) {
+ info("Dropping id " + id + " at index " + index);
+ ids.splice(index, 1);
+ if (!ids.length) {
+ SimpleTest.executeSoon(function() {
+ is(weak.get(), null, "The weak reference must be dropped now");
+ SpecialPowers.removeObserver(observer, "webaudio-node-demise");
+ SimpleTest.finish();
+ });
+ }
+ }
+ }
+ SpecialPowers.addObserver(observer, "webaudio-node-demise");
+
+ forceCC();
+ forceCC();
+
+ function forceCC() {
+ SpecialPowers.DOMWindowUtils.cycleCollect();
+ SpecialPowers.DOMWindowUtils.garbageCollect();
+ SpecialPowers.DOMWindowUtils.garbageCollect();
+ }
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html b/dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html
new file mode 100644
index 0000000000..81fdd357c9
--- /dev/null
+++ b/dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the devtool AudioParam API</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+ function checkIdAndName(node, name) {
+ is(SpecialPowers.getPrivilegedProps(node, "id"),
+ SpecialPowers.getPrivilegedProps(node[name], "parentNodeId"),
+ "The parent id should be correct");
+ is(SpecialPowers.getPrivilegedProps(node[name], "name"), name,
+ "The name of the AudioParam should be correct.");
+ }
+
+ var ac = new AudioContext(),
+ gain = ac.createGain(),
+ osc = ac.createOscillator(),
+ del = ac.createDelay(),
+ source = ac.createBufferSource(),
+ stereoPanner = ac.createStereoPanner(),
+ comp = ac.createDynamicsCompressor(),
+ biquad = ac.createBiquadFilter();
+
+ checkIdAndName(gain, "gain");
+ checkIdAndName(osc, "frequency");
+ checkIdAndName(osc, "detune");
+ checkIdAndName(del, "delayTime");
+ checkIdAndName(source, "playbackRate");
+ checkIdAndName(source, "detune");
+ checkIdAndName(stereoPanner, "pan");
+ checkIdAndName(comp, "threshold");
+ checkIdAndName(comp, "knee");
+ checkIdAndName(comp, "ratio");
+ checkIdAndName(comp, "attack");
+ checkIdAndName(comp, "release");
+ checkIdAndName(biquad, "frequency");
+ checkIdAndName(biquad, "detune");
+ checkIdAndName(biquad, "Q");
+ checkIdAndName(biquad, "gain");
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_OfflineAudioContext.html b/dom/media/webaudio/test/test_OfflineAudioContext.html
new file mode 100644
index 0000000000..d9403566ae
--- /dev/null
+++ b/dom/media/webaudio/test/test_OfflineAudioContext.html
@@ -0,0 +1,118 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test OfflineAudioContext</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var renderedBuffer = null;
+var finished = 0;
+
+function finish() {
+ finished++;
+ if (finished == 2) {
+ SimpleTest.finish();
+ }
+}
+
+function setOrCompareRenderedBuffer(aRenderedBuffer) {
+ if (renderedBuffer) {
+ is(renderedBuffer, aRenderedBuffer, "Rendered buffers from the event and the promise should be the same");
+ finish();
+ } else {
+ renderedBuffer = aRenderedBuffer;
+ }
+}
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ let ctxs = [
+ new OfflineAudioContext(2, 100, 22050),
+ new OfflineAudioContext({length: 100, sampleRate: 22050}),
+ new OfflineAudioContext({channels: 2, length: 100, sampleRate: 22050}),
+ ];
+
+ for (let ctx of ctxs) {
+ ok(ctx instanceof EventTarget, "OfflineAudioContexts must be EventTargets");
+ is(ctx.length, 100, "OfflineAudioContext.length is equal to the value passed to the ctor.");
+
+ var buf = ctx.createBuffer(2, 100, ctx.sampleRate);
+ for (var i = 0; i < 2; ++i) {
+ for (var j = 0; j < 100; ++j) {
+ buf.getChannelData(i)[j] = Math.sin(2 * Math.PI * 200 * j / ctx.sampleRate);
+ }
+ }
+ }
+
+ is(ctxs[1].destination.channelCount, 1, "OfflineAudioContext defaults to to correct channelCount.");
+
+ let ctx = ctxs[0];
+
+ expectException(function() {
+ new OfflineAudioContext(2, 100, 0);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ new OfflineAudioContext(2, 100, -1);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ new OfflineAudioContext(0, 100, 44100);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ new OfflineAudioContext(32, 100, 44100);
+ expectException(function() {
+ new OfflineAudioContext(33, 100, 44100);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ new OfflineAudioContext(2, 0, 44100);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectTypeError(function() {
+ new OfflineAudioContext({});
+ });
+ expectTypeError(function() {
+ new OfflineAudioContext({sampleRate: 44100});
+ });
+ expectTypeError(function() {
+ new OfflineAudioContext({length: 44100*40});
+ });
+
+ var src = ctx.createBufferSource();
+ src.buffer = buf;
+ src.start(0);
+ src.connect(ctx.destination);
+
+ ctx.addEventListener("complete", function(e) {
+ ok(e instanceof OfflineAudioCompletionEvent, "Correct event received");
+ is(e.renderedBuffer.numberOfChannels, 2, "Correct expected number of buffers");
+ ok(renderedBuffer != null, "The event should be fired after the promise callback.");
+ expectNoException(function() {
+ ctx.startRendering().then(function() {
+ ok(false, "Promise should not resolve when startRendering is called a second time on an OfflineAudioContext")
+ finish();
+ }).catch(function(err) {
+ ok(true, "Promise should reject when startRendering is called a second time on an OfflineAudioContext")
+ finish();
+ });
+ });
+ compareBuffers(e.renderedBuffer, buf);
+ setOrCompareRenderedBuffer(e.renderedBuffer);
+
+ });
+
+ expectNoException(function() {
+ ctx.startRendering().then(function(b) {
+ is(renderedBuffer, null, "The promise callback should be called first.");
+ setOrCompareRenderedBuffer(b);
+ }).catch(function (error) {
+ ok(false, "The promise from OfflineAudioContext.startRendering should never be rejected");
+ });
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_ScriptProcessorCollected1.html b/dom/media/webaudio/test/test_ScriptProcessorCollected1.html
new file mode 100644
index 0000000000..8f05889d26
--- /dev/null
+++ b/dom/media/webaudio/test/test_ScriptProcessorCollected1.html
@@ -0,0 +1,77 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ScriptProcessorNode in cycle with no listener is collected</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+var observer = function(subject, topic, data) {
+ var id = parseInt(data);
+ var index = ids.indexOf(id);
+ if (index != -1) {
+ ok(true, "Collected AudioNode id " + id + " at index " + index);
+ ids.splice(index, 1);
+ }
+}
+
+SpecialPowers.addObserver(observer, "webaudio-node-demise");
+
+SimpleTest.registerCleanupFunction(function() {
+ if (observer) {
+ SpecialPowers.removeObserver(observer, "webaudio-node-demise");
+ }
+});
+
+var ac = new AudioContext();
+
+var testProcessor = ac.createScriptProcessor(256, 1, 0);
+var delay = ac.createDelay();
+testProcessor.connect(delay);
+delay.connect(testProcessor);
+
+var referenceProcessor = ac.createScriptProcessor(256, 1, 0);
+var gain = ac.createGain();
+gain.connect(referenceProcessor);
+
+var processCount = 0;
+testProcessor.onaudioprocess = function(event) {
+ ++processCount;
+ switch (processCount) {
+ case 1:
+ // Switch to listening to referenceProcessor;
+ referenceProcessor.onaudioprocess = event.target.onaudioprocess;
+ referenceProcessor = null;
+ event.target.onaudioprocess = null;
+ break;
+ case 2:
+ // There are no references to testProcessor and so GC can begin.
+ SpecialPowers.exactGC(function() {
+ SpecialPowers.removeObserver(observer, "webaudio-node-demise");
+ observer = null;
+ event.target.onaudioprocess = null;
+ ok(!ids.length, "All expected nodes should be collected");
+ SimpleTest.finish();
+ });
+ break;
+ }
+};
+
+function id(node) {
+ return SpecialPowers.getPrivilegedProps(node, "id");
+}
+
+// Nodes with these ids should be collected.
+var ids = [ id(testProcessor), id(delay), id(gain) ];
+testProcessor = null;
+delay = null;
+gain = null;
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_WebAudioMemoryReporting.html b/dom/media/webaudio/test/test_WebAudioMemoryReporting.html
new file mode 100644
index 0000000000..693e558304
--- /dev/null
+++ b/dom/media/webaudio/test/test_WebAudioMemoryReporting.html
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Web Audio memory reporting</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+var ac = new AudioContext();
+var sp = ac.createScriptProcessor(4096, 1, 1);
+sp.connect(ac.destination);
+
+// Not started so as to test
+// https://bugzilla.mozilla.org/show_bug.cgi?id=1225003#c2
+var oac = new OfflineAudioContext(1, 1, 48000);
+
+var nodeTypes = ["ScriptProcessorNode", "AudioDestinationNode"];
+var objectTypes = ["dom-nodes", "engine-objects", "track-objects"];
+
+var usages = { "explicit/webaudio/audiocontext": 0 };
+
+for (var i = 0; i < nodeTypes.length; ++i) {
+ for (var j = 0; j < objectTypes.length; ++j) {
+ usages["explicit/webaudio/audio-node/" +
+ nodeTypes[i] + "/" + objectTypes[j]] = 0;
+ }
+}
+
+var handleReport = function(aProcess, aPath, aKind, aUnits, aAmount, aDesc) {
+ if (aPath in usages) {
+ usages[aPath] += aAmount;
+ }
+}
+
+var finished = function () {
+ ok(true, "Yay didn't crash!");
+ for (var resource in usages) {
+ ok(usages[resource] > 0, "Non-zero usage for " + resource);
+ };
+ SimpleTest.finish();
+}
+
+SpecialPowers.Cc["@mozilla.org/memory-reporter-manager;1"].
+ getService(SpecialPowers.Ci.nsIMemoryReporterManager).
+ getReports(handleReport, null, finished, null, /* anonymized = */ false);
+
+// To test bug 1225003, run a failing decodeAudioData() job over a time when
+// the tasks from getReports() are expected to run.
+ac.decodeAudioData(new ArrayBuffer(4), function(){}, function(){});
+</script>
+</html>
diff --git a/dom/media/webaudio/test/test_analyserNode.html b/dom/media/webaudio/test/test_analyserNode.html
new file mode 100644
index 0000000000..0793eeb2cb
--- /dev/null
+++ b/dom/media/webaudio/test/test_analyserNode.html
@@ -0,0 +1,178 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AnalyserNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function testNode() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var destination = context.destination;
+
+ var source = context.createBufferSource();
+
+ var analyser = context.createAnalyser();
+
+ source.buffer = buffer;
+
+ source.connect(analyser);
+ analyser.connect(destination);
+
+ is(analyser.channelCount, 2, "analyser node has 2 input channels by default");
+ is(analyser.channelCountMode, "max", "Correct channelCountMode for the analyser node");
+ is(analyser.channelInterpretation, "speakers", "Correct channelCountInterpretation for the analyser node");
+
+ is(analyser.fftSize, 2048, "Correct default value for fftSize");
+ is(analyser.frequencyBinCount, 1024, "Correct default value for frequencyBinCount");
+ expectException(function() {
+ analyser.fftSize = 0;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 1;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 8;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 100; // non-power of two
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 2049;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 4097;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 8193;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 16385;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 32769;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.fftSize = 65536;
+ }, DOMException.INDEX_SIZE_ERR);
+ analyser.fftSize = 1024;
+ is(analyser.frequencyBinCount, 512, "Correct new value for frequencyBinCount");
+
+ is(analyser.minDecibels, -100, "Correct default value for minDecibels");
+ is(analyser.maxDecibels, -30, "Correct default value for maxDecibels");
+ expectException(function() {
+ analyser.minDecibels = -30;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.minDecibels = -29;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.maxDecibels = -100;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.maxDecibels = -101;
+ }, DOMException.INDEX_SIZE_ERR);
+
+ ok(Math.abs(analyser.smoothingTimeConstant - 0.8) < 0.001, "Correct default value for smoothingTimeConstant");
+ expectException(function() {
+ analyser.smoothingTimeConstant = -0.1;
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser.smoothingTimeConstant = 1.1;
+ }, DOMException.INDEX_SIZE_ERR);
+ analyser.smoothingTimeConstant = 0;
+ analyser.smoothingTimeConstant = 1;
+}
+
+function testConstructor() {
+ var context = new AudioContext();
+
+ var analyser = new AnalyserNode(context);
+ is(analyser.channelCount, 2, "analyser node has 2 input channels by default");
+ is(analyser.channelCountMode, "max", "Correct channelCountMode for the analyser node");
+ is(analyser.channelInterpretation, "speakers", "Correct channelCountInterpretation for the analyser node");
+
+ is(analyser.fftSize, 2048, "Correct default value for fftSize");
+ is(analyser.frequencyBinCount, 1024, "Correct default value for frequencyBinCount");
+ is(analyser.minDecibels, -100, "Correct default value for minDecibels");
+ is(analyser.maxDecibels, -30, "Correct default value for maxDecibels");
+ ok(Math.abs(analyser.smoothingTimeConstant - 0.8) < 0.001, "Correct default value for smoothingTimeConstant");
+
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 0 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 1 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 8 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 100 }); // non-power of two
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 2049 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 4097 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 8193 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 16385 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 32769 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { fftSize: 65536 });
+ }, DOMException.INDEX_SIZE_ERR);
+ analyser = new AnalyserNode(context, { fftSize: 1024 });
+ is(analyser.frequencyBinCount, 512, "Correct new value for frequencyBinCount");
+
+ expectException(function() {
+ analyser = new AnalyserNode(context, { minDecibels: -30 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { minDecibels: -29 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { maxDecibels: -100 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { maxDecibels: -101 });
+ }, DOMException.INDEX_SIZE_ERR);
+
+ expectException(function() {
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: -0.1 });
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: -1.1 });
+ }, DOMException.INDEX_SIZE_ERR);
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: 0 });
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: 1 });
+}
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+ testNode();
+ testConstructor();
+
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_analyserNodeMinimum.html b/dom/media/webaudio/test/test_analyserNodeMinimum.html
new file mode 100644
index 0000000000..950fd1812b
--- /dev/null
+++ b/dom/media/webaudio/test/test_analyserNodeMinimum.html
@@ -0,0 +1,51 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AnalyserNode when the input is silent</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+ var ac = new AudioContext();
+ var analyser = ac.createAnalyser();
+ var constant = ac.createConstantSource();
+ var sp = ac.createScriptProcessor(2048, 1, 0);
+
+ constant.offset.value = 0.0;
+
+ constant.connect(analyser)
+ .connect(ac.destination);
+
+ constant.connect(sp);
+
+ var buf = new Float32Array(analyser.frequencyBinCount);
+ var iteration_count = 10;
+ sp.onaudioprocess = function() {
+ analyser.getFloatFrequencyData(buf);
+ var correct = true;
+ for (var i = 0; i < buf.length; i++) {
+ correct &= buf[i] == -Infinity;
+ }
+ ok(correct, "silent input process -Infinity in decibel bins");
+ if(!(iteration_count--)) {
+ sp.onaudioprocess = null;
+ constant.stop();
+ ac.close();
+ SimpleTest.finish();
+ }
+ }
+
+ constant.start();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_analyserNodeOutput.html b/dom/media/webaudio/test/test_analyserNodeOutput.html
new file mode 100644
index 0000000000..27b354e92d
--- /dev/null
+++ b/dom/media/webaudio/test/test_analyserNodeOutput.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AnalyserNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var analyser = context.createAnalyser();
+
+ source.buffer = this.buffer;
+
+ source.connect(analyser);
+
+ source.start(0);
+ return analyser;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_analyserNodePassThrough.html b/dom/media/webaudio/test/test_analyserNodePassThrough.html
new file mode 100644
index 0000000000..50ff94a8c7
--- /dev/null
+++ b/dom/media/webaudio/test/test_analyserNodePassThrough.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AnalyserNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var analyser = context.createAnalyser();
+
+ source.buffer = this.buffer;
+
+ source.connect(analyser);
+
+ var analyserWrapped = SpecialPowers.wrap(analyser);
+ ok("passThrough" in analyserWrapped, "AnalyserNode should support the passThrough API");
+ analyserWrapped.passThrough = true;
+
+ source.start(0);
+ return analyser;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_analyserNodeWithGain.html b/dom/media/webaudio/test/test_analyserNodeWithGain.html
new file mode 100644
index 0000000000..fa0a2caa75
--- /dev/null
+++ b/dom/media/webaudio/test/test_analyserNodeWithGain.html
@@ -0,0 +1,47 @@
+<!DOCTYPE html>
+<title>Test effect of AnalyserNode on GainNode output</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ // fftSize <= bufferSize so that the time domain data is full of input after
+ // processing the buffer.
+ const fftSize = 32;
+ const bufferSize = 128;
+
+ var context = new OfflineAudioContext(1, bufferSize, 48000);
+
+ var analyser1 = context.createAnalyser();
+ analyser1.fftSize = fftSize;
+ analyser1.connect(context.destination);
+ var analyser2 = context.createAnalyser();
+ analyser2.fftSize = fftSize;
+
+ var gain = context.createGain();
+ gain.gain.value = 2.0;
+ gain.connect(analyser1);
+ gain.connect(analyser2);
+
+ // Create a DC input to make getFloatTimeDomainData() output consistent at
+ // any time.
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0 / gain.gain.value;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.loop = true;
+ source.connect(gain);
+ source.start();
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.getChannelData(0)[0], 1.0,
+ "analyser1 output");
+
+ var data = new Float32Array(1);
+ analyser1.getFloatTimeDomainData(data);
+ assert_equals(data[0], 1.0, "analyser1 time domain data");
+ analyser2.getFloatTimeDomainData(data);
+ assert_equals(data[0], 1.0, "analyser2 time domain data");
+ });
+});
+</script>
diff --git a/dom/media/webaudio/test/test_analyserScale.html b/dom/media/webaudio/test/test_analyserScale.html
new file mode 100644
index 0000000000..f11e4f2b28
--- /dev/null
+++ b/dom/media/webaudio/test/test_analyserScale.html
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AnalyserNode when the input is scaled </title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+ var context = new AudioContext();
+
+ var gain = context.createGain();
+ var analyser = context.createAnalyser();
+ var osc = context.createOscillator();
+
+
+ osc.connect(gain);
+ gain.connect(analyser);
+
+ osc.start();
+
+ var array = new Uint8Array(analyser.frequencyBinCount);
+
+ function getAnalyserData() {
+ gain.gain.setValueAtTime(currentGain, context.currentTime);
+ analyser.getByteTimeDomainData(array);
+ var inrange = true;
+ var max = -1;
+ for (var i = 0; i < array.length; i++) {
+ if (array[i] > max) {
+ max = Math.abs(array[i] - 128);
+ }
+ }
+ if (max <= currentGain * 128) {
+ ok(true, "Analyser got scaled data for " + currentGain);
+ currentGain = tests.shift();
+ if (currentGain == undefined) {
+ SimpleTest.finish();
+ return;
+ }
+ }
+ requestAnimationFrame(getAnalyserData);
+ }
+
+ var tests = [1.0, 0.5, 0.0];
+ var currentGain = tests.shift();
+ requestAnimationFrame(getAnalyserData);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNode.html b/dom/media/webaudio/test/test_audioBufferSourceNode.html
new file mode 100644
index 0000000000..fc7c0b48d1
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNode.html
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+ source.start(0);
+ source.buffer = buffer;
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var buffers = [];
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ buffer.getChannelData(1)[i] = buffer.getChannelData(0)[i];
+ }
+ buffers.push(buffer);
+ buffers.push(getEmptyBuffer(context, 2048));
+ return buffers;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html b/dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html
new file mode 100644
index 0000000000..e84c33e585
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode when an AudioBuffer's getChanneData buffer is detached</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function createGarbage() {
+ var s = [];
+ for (var i = 0; i < 10000000; ++i) {
+ s.push(i);
+ }
+ var sum = 0;
+ for (var i = 0; i < s.length; ++i) {
+ sum += s[i];
+ }
+ return sum;
+}
+
+var worker = new Worker("audioBufferSourceNodeDetached_worker.js");
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 10000000, context.sampleRate);
+ var data = buffer.getChannelData(0);
+ for (var i = 0; i < data.length; ++i) {
+ data[i] = (i%100)/100 - 0.5;
+ }
+
+ // Detach the buffer now
+ var data = buffer.getChannelData(0).buffer;
+ worker.postMessage(data, [data]);
+ // Create garbage and GC to replace the buffer data with garbage
+ SpecialPowers.gc();
+ createGarbage();
+ SpecialPowers.gc();
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.start();
+ // This should play silence
+ return source;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html b/dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html
new file mode 100644
index 0000000000..a11bb880a2
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ended event on AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+
+ source.onended = function(e) {
+ is(e.target, source, "Correct target for the ended event");
+ SimpleTest.finish();
+ };
+
+ source.start(0);
+ source.buffer = buffer;
+ source.connect(context.destination);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html
new file mode 100644
index 0000000000..757d4487c4
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ // silence for half of the buffer, ones after that.
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 1024; i < 2048; i++) {
+ buffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+
+ // we start at the 1024 frames, we should only have ones.
+ source.loop = true;
+ source.loopStart = 1024 / context.sampleRate;
+ source.loopEnd = 2048 / context.sampleRate;
+ source.buffer = buffer;
+ source.start(0, 1024 / context.sampleRate, 2048 / context.sampleRate);
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate);
+ for (var i = 0; i < 2048; i++) {
+ expectedBuffer.getChannelData(0)[i] = 1;
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html
new file mode 100644
index 0000000000..10d5d99108
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode looping</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048 * 4,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ source.start(0);
+ source.loop = true;
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048 * 4, context.sampleRate);
+ for (var i = 0; i < 4; ++i) {
+ for (var j = 0; j < 2048; ++j) {
+ expectedBuffer.getChannelData(0)[i * 2048 + j] = Math.sin(440 * 2 * Math.PI * j / context.sampleRate);
+ }
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html
new file mode 100644
index 0000000000..1ef08e0b83
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode looping</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048 * 4,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = new AudioBufferSourceNode(context, {buffer, loop: true, loopStart: buffer.duration * 0.25, loopEnd: buffer.duration * 0.75 });
+ source.start(0);
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048 * 4, context.sampleRate);
+ for (var i = 0; i < 1536; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+ for (var i = 0; i < 6; ++i) {
+ for (var j = 512; j < 1536; ++j) {
+ expectedBuffer.getChannelData(0)[1536 + i * 1024 + j - 512] = Math.sin(440 * 2 * Math.PI * j / context.sampleRate);
+ }
+ }
+ for (var j = 7680; j < 2048 * 4; ++j) {
+ expectedBuffer.getChannelData(0)[j] = Math.sin(440 * 2 * Math.PI * (j - 7168) / context.sampleRate);
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html
new file mode 100644
index 0000000000..cfe054f838
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html
@@ -0,0 +1,44 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode looping</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ source.loop = true;
+ source.loopStart = source.loopEnd = 1 / context.sampleRate;
+ source.start(0);
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+ return buffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html b/dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html
new file mode 100644
index 0000000000..e9a0472e2a
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html
@@ -0,0 +1,33 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode when start() is not called</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ var data = buffer.getChannelData(0);
+ for (var i = 0; i < data.length; ++i) {
+ data[i] = (i%100)/100 - 0.5;
+ }
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ return source;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html b/dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html
new file mode 100644
index 0000000000..b0b405b366
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ source.start(0);
+ source.buffer = null;
+ is(source.buffer, null, "Try playing back a null buffer");
+ return source;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html b/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html
new file mode 100644
index 0000000000..0411b74ce5
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the offset property on AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var fuzz = 0.3;
+
+if (navigator.platform.startsWith("Mac")) {
+ // bug 895720
+ fuzz = 0.6;
+}
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var samplesFromSource = 0;
+ var context = new AudioContext();
+ var sp = context.createScriptProcessor(256);
+
+ sp.onaudioprocess = function(e) {
+ samplesFromSource += e.inputBuffer.length;
+ }
+
+ var buffer = context.createBuffer(1, context.sampleRate, context.sampleRate);
+ for (var i = 0; i < context.sampleRate; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+
+ source.onended = function(e) {
+ // The timing at which the audioprocess and ended listeners are called can
+ // change, hence the fuzzy equal here.
+ var errorRatio = samplesFromSource / (0.5 * context.sampleRate);
+ ok(errorRatio > (1.0 - fuzz) && errorRatio < (1.0 + fuzz),
+ "Correct number of samples received (expected: " +
+ (0.5 * context.sampleRate) + ", actual: " + samplesFromSource + ").");
+ SimpleTest.finish();
+ };
+
+ source.buffer = buffer;
+ source.connect(sp);
+ source.start(0, 0.5);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html b/dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html
new file mode 100644
index 0000000000..6cb0cccf99
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+
+ source.buffer = buffer;
+
+ var srcWrapped = SpecialPowers.wrap(source);
+ ok("passThrough" in srcWrapped, "AudioBufferSourceNode should support the passThrough API");
+ srcWrapped.passThrough = true;
+
+ source.start(0);
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+
+ return [expectedBuffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeRate.html b/dom/media/webaudio/test/test_audioBufferSourceNodeRate.html
new file mode 100644
index 0000000000..85049bfd6d
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioBufferSourceNodeRate.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+var rate = 44100;
+var off = new OfflineAudioContext(1, rate, rate);
+var off2 = new OfflineAudioContext(1, rate, rate);
+
+var source = off.createBufferSource();
+var source2 = off2.createBufferSource();
+
+// a buffer of a 440Hz at half the length. If we detune by -1200 or set the
+// playbackRate to 0.5, we should get 44100 samples back with a sine at 220Hz.
+var buf = off.createBuffer(1, rate / 2, rate);
+var bufarray = buf.getChannelData(0);
+for (var i = 0; i < bufarray.length; i++) {
+ bufarray[i] = Math.sin(i * 440 * 2 * Math.PI / rate);
+}
+
+source.buffer = buf;
+source.playbackRate.value = 0.5; // 50% slowdown
+source.connect(off.destination);
+source.start(0);
+
+source2.buffer = buf;
+source2.detune.value = -1200; // one octave -> 50% slowdown
+source2.connect(off2.destination);
+source2.start(0);
+
+off.startRendering().then((renderedPlaybackRate) => {
+ // we don't care about comparing the value here, we just want to know whether
+ // the second part is noisy.
+ var rmsValue = rms(renderedPlaybackRate, 0, 22050);
+ ok(rmsValue != 0, "Resampling happened (rms of the second part " + rmsValue + ")");
+
+ off2.startRendering().then((renderedDetune) => {
+ var rmsValue = rms(renderedDetune, 0, 22050);
+ ok(rmsValue != 0, "Resampling happened (rms of the second part " + rmsValue + ")");
+ // The two buffers should be the same: detune of -1200 is a 50% slowdown
+ compareBuffers(renderedPlaybackRate, renderedDetune);
+ SimpleTest.finish();
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioContextGC.html b/dom/media/webaudio/test/test_audioContextGC.html
new file mode 100644
index 0000000000..be9990cfad
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioContextGC.html
@@ -0,0 +1,162 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test inactive AudioContext is garbage collected</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+let ids;
+
+const observer = (subject, topic, data) => {
+ const id = parseInt(data);
+ if (ids) {
+ ok(ids.delete(id), "Collected AudioNode id " + id);
+ }
+}
+SpecialPowers.addObserver(observer, "webaudio-node-demise");
+
+SimpleTest.registerCleanupFunction(function() {
+ if (observer) {
+ SpecialPowers.removeObserver(observer, "webaudio-node-demise");
+ }
+});
+
+function id(node) {
+ return SpecialPowers.getPrivilegedProps(node, "id");
+}
+
+let tests = [{
+ name: "Bare running AudioContext", setup: () => {
+ const ac = new AudioContext();
+ ids.add(id(ac.destination));
+ // Await state change notification before collection.
+ return new Promise((resolve) => {
+ ac.onstatechange = () => {
+ is(ac.state, "running", "ac.state");
+ resolve();
+ };
+ });
+ }
+}, {
+ name: "Stopped source", setup: () => {
+ const ac = new AudioContext();
+ ids.add(id(ac.destination));
+ const source = new ConstantSourceNode(ac);
+ ids.add(id(source));
+ source.start();
+ source.stop();
+ // Await ended notification before collection.
+ return new Promise((resolve) => {
+ source.onended = () => {
+ is(ac.state, "running", "ac.state");
+ resolve();
+ };
+ });
+ }
+}, {
+ name: "OfflineAudioContext not started", setup: () => {
+ const ac = new OfflineAudioContext({
+ numberOfChannels: 1, length: 1, sampleRate: 48000
+ });
+ ids.add(id(ac.destination));
+ const source = new ConstantSourceNode(ac);
+ ids.add(id(source));
+ source.start();
+ }
+}, {
+ name: "Completed OfflineAudioContext", setup: async () => {
+ const ac = new OfflineAudioContext({
+ numberOfChannels: 1, length: 1, sampleRate: 48000
+ });
+ ids.add(id(ac.destination));
+ const sourceBeforeStart = new ConstantSourceNode(ac);
+ ids.add(id(sourceBeforeStart));
+ sourceBeforeStart.start();
+ ac.startRendering();
+ await new Promise((resolve) => {
+ ac.oncomplete = () => {
+ resolve();
+ };
+ });
+ const sourceAfterComplete = new ConstantSourceNode(ac);
+ ids.add(id(sourceAfterComplete));
+ sourceAfterComplete.start();
+ }
+}, {
+ name: "suspended AudioContext", setup: async () => {
+ const ac = new AudioContext();
+ ids.add(id(ac.destination));
+ const sourceBeforeSuspend = new ConstantSourceNode(ac);
+ ids.add(id(sourceBeforeSuspend));
+ sourceBeforeSuspend.start();
+ ac.suspend();
+ const sourceAfterSuspend = new ConstantSourceNode(ac);
+ ids.add(id(sourceAfterSuspend));
+ sourceAfterSuspend.start();
+ await new Promise((resolve) => {
+ ac.onstatechange = () => {
+ if (ac.state == "suspended") {
+ resolve();
+ }
+ };
+ });
+ const sourceAfterSuspended = new ConstantSourceNode(ac);
+ ids.add(id(sourceAfterSuspended));
+ sourceAfterSuspended.start();
+ }
+}, {
+ name: "closed AudioContext", setup: async () => {
+ const ac = new AudioContext();
+ ids.add(id(ac.destination));
+ const sourceBeforeClose = new ConstantSourceNode(ac);
+ ids.add(id(sourceBeforeClose));
+ sourceBeforeClose.start();
+ ac.close();
+ const sourceAfterClose = new ConstantSourceNode(ac);
+ ids.add(id(sourceAfterClose));
+ sourceAfterClose.start();
+ await new Promise((resolve) => {
+ ac.onstatechange = () => {
+ if (ac.state == "closed") {
+ resolve();
+ }
+ };
+ });
+ const sourceAfterClosed = new ConstantSourceNode(ac);
+ ids.add(id(sourceAfterClosed));
+ sourceAfterClosed.start();
+ }
+}];
+
+const start_next_test = async () => {
+ const test = tests.shift();
+ if (!test) {
+ SimpleTest.finish();
+ return;
+ }
+ // Collect all audio nodes from previous tests.
+ if (!ids) {
+ await new Promise(resolve => {
+ SpecialPowers.exactGC(resolve);
+ });
+ }
+ ids = new Set();
+ await test.setup();
+ SpecialPowers.exactGC(() => {
+ is(ids.size, 0,
+ `All expected nodes for "${test.name}" should be collected`);
+ start_next_test();
+ });
+}
+
+start_next_test();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioContextParams_recordNonDefaultSampleRate.html b/dom/media/webaudio/test/test_audioContextParams_recordNonDefaultSampleRate.html
new file mode 100644
index 0000000000..8177202117
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioContextParams_recordNonDefaultSampleRate.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ <script type="text/javascript" src="manifest.js"></script>
+</head>
+<body>
+<pre id="test">
+
+<script class="testbody" type="text/javascript">
+function startTest() {
+ let ctx = new AudioContext({sampleRate: 32000});
+ oscillator = ctx.createOscillator();
+ let dest = ctx.createMediaStreamDestination();
+ oscillator.connect(dest);
+ oscillator.start();
+ let stream = dest.stream;
+
+ recorder = new MediaRecorder(stream);
+ recorder.ondataavailable = (e) => {
+ ok(true, 'recorder ondataavailable event');
+ if (recorder.state == 'recording') {
+ ok(e.data.size > 0, 'check blob has data');
+ recorder.stop();
+ }
+ }
+
+ recorder.onstop = () => {
+ ok(true, 'recorder stop event');
+ SimpleTest.finish();
+ }
+
+ try {
+ recorder.start(1000);
+ ok(true, 'recorder started');
+ is('recording', recorder.state, 'check record state recording');
+ } catch (e) {
+ ok(false, 'Can t record audio context');
+ }
+}
+
+startTest();
+SimpleTest.waitForExplicitFinish();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioContextParams_sampleRate.html b/dom/media/webaudio/test/test_audioContextParams_sampleRate.html
new file mode 100644
index 0000000000..280452403d
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioContextParams_sampleRate.html
@@ -0,0 +1,81 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <script type="application/javascript" src="mediaStreamPlayback.js"></script>
+</head>
+<body>
+<pre id="test">
+
+<script>
+createHTML({
+ title: "Parallel MTG by setting AudioContextParam sample rate",
+ bug: "1387454",
+ visible: true
+});
+
+runTest(async () => {
+ // Test an AudioContext of specific sample rate.
+ // Verify that the oscillator produces a tone.
+ const rate1 = 500;
+ const ac1 = new AudioContext({sampleRate: 44100});
+ const dest_ac1 = ac1.createMediaStreamDestination();
+ const osc_ac1 = ac1.createOscillator();
+ osc_ac1.frequency.value = rate1;
+ osc_ac1.connect(dest_ac1);
+ osc_ac1.start(0);
+
+ const analyser = new AudioStreamAnalyser(ac1, dest_ac1.stream);
+ analyser.enableDebugCanvas();
+ await analyser.waitForAnalysisSuccess( array => {
+ const freg_50Hz = array[analyser.binIndexForFrequency(50)];
+ const freq_rate1 = array[analyser.binIndexForFrequency(rate1)];
+ const freq_4000Hz = array[analyser.binIndexForFrequency(4000)];
+
+ info("Analysing audio frequency - low:target1:high = "
+ + freg_50Hz + ':' + freq_rate1 + ':' + freq_4000Hz);
+ return freg_50Hz < 50 && freq_rate1 > 200 && freq_4000Hz < 50;
+ })
+ osc_ac1.stop();
+
+ // Same test using a new AudioContext of different sample rate.
+ const rate2 = 1500;
+ const ac2 = new AudioContext({sampleRate: 48000});
+ const dest_ac2 = ac2.createMediaStreamDestination();
+ const osc_ac2 = ac2.createOscillator();
+ osc_ac2.frequency.value = rate2;
+ osc_ac2.connect(dest_ac2);
+ osc_ac2.start(0);
+
+ const analyser2 = new AudioStreamAnalyser(ac2, dest_ac2.stream);
+ analyser2.enableDebugCanvas();
+ await analyser2.waitForAnalysisSuccess( array => {
+ const freg_50Hz = array[analyser2.binIndexForFrequency(50)];
+ const freq_rate2 = array[analyser2.binIndexForFrequency(rate2)];
+ const freq_4000Hz = array[analyser2.binIndexForFrequency(4000)];
+
+ info("Analysing audio frequency - low:target2:high = "
+ + freg_50Hz + ':' + freq_rate2 + ':' + freq_4000Hz);
+ return freg_50Hz < 50 && freq_rate2 > 200 && freq_4000Hz < 50;
+ })
+ osc_ac2.stop();
+
+ // Two AudioContexts with different sample rate cannot communicate.
+ mustThrowWith("Connect nodes with different sample rate", "NotSupportedError",
+ () => ac2.createMediaStreamSource(dest_ac1.stream));
+
+ // Two AudioContext with the same sample rate can communicate.
+ const ac3 = new AudioContext({sampleRate: 48000});
+ const dest_ac3 = ac3.createMediaStreamDestination();
+ const source_ac2 = ac2.createMediaStreamSource(dest_ac3.stream);
+ ok(true, "Connect nodes with the same sample rate is ok");
+
+ mustThrowWith("Invalid zero samplerate", "NotSupportedError",
+ () => new AudioContext({sampleRate: 0}));
+
+ mustThrowWith("Invalid negative samplerate", "NotSupportedError",
+ () => new AudioContext({sampleRate: -1}));
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
new file mode 100644
index 0000000000..36cf8f720c
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html
@@ -0,0 +1,419 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test suspend, resume and close method of the AudioContext</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function tryToCreateNodeOnClosedContext(ctx) {
+ is(ctx.state, "closed", "The context is in closed state");
+
+ [ { name: "createBufferSource" },
+ { name: "createMediaStreamDestination",
+ onOfflineAudioContext: false},
+ { name: "createScriptProcessor" },
+ { name: "createStereoPanner" },
+ { name: "createAnalyser" },
+ { name: "createGain" },
+ { name: "createDelay" },
+ { name: "createBiquadFilter" },
+ { name: "createWaveShaper" },
+ { name: "createPanner" },
+ { name: "createConvolver" },
+ { name: "createChannelSplitter" },
+ { name: "createChannelMerger" },
+ { name: "createDynamicsCompressor" },
+ { name: "createOscillator" },
+ { name: "createMediaElementSource",
+ args: [new Audio()],
+ onOfflineAudioContext: false },
+ { name: "createMediaStreamSource",
+ args: [(new AudioContext()).createMediaStreamDestination().stream],
+ onOfflineAudioContext: false } ].forEach(function(e) {
+
+ if (e.onOfflineAudioContext == false &&
+ ctx instanceof OfflineAudioContext) {
+ return;
+ }
+
+ expectNoException(function() {
+ ctx[e.name].apply(ctx, e.args);
+ }, DOMException.INVALID_STATE_ERR);
+ });
+}
+
+function loadFile(url, callback) {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", url, true);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = function() {
+ callback(xhr.response);
+ };
+ xhr.send();
+}
+
+// createBuffer, createPeriodicWave and decodeAudioData should work on a context
+// that has `state` == "closed"
+function tryLegalOpeerationsOnClosedContext(ctx) {
+ is(ctx.state, "closed", "The context is in closed state");
+
+ [ { name: "createBuffer",
+ args: [1, 44100, 44100] },
+ { name: "createPeriodicWave",
+ args: [new Float32Array(10), new Float32Array(10)] }
+ ].forEach(function(e) {
+ expectNoException(function() {
+ ctx[e.name].apply(ctx, e.args);
+ });
+ });
+ loadFile("ting-44.1k-1ch.ogg", function(buf) {
+ ctx.decodeAudioData(buf).then(function(decodedBuf) {
+ ok(true, "decodeAudioData on a closed context should work, it did.")
+ finish();
+ }).catch(function(e){
+ ok(false, "decodeAudioData on a closed context should work, it did not");
+ finish();
+ });
+ });
+}
+
+// Test that MediaStreams that are the output of a suspended AudioContext are
+// producing silence
+// ac1 produce a sine fed to a MediaStreamAudioDestinationNode
+// ac2 is connected to ac1 with a MediaStreamAudioSourceNode, and check that
+// there is silence when ac1 is suspended
+function testMultiContextOutput() {
+ var ac1 = new AudioContext(),
+ ac2 = new AudioContext();
+
+ ac1.onstatechange = function() {
+ ac1.onstatechange = null;
+
+ var osc1 = ac1.createOscillator(),
+ mediaStreamDestination1 = ac1.createMediaStreamDestination();
+
+ var mediaStreamAudioSourceNode2 =
+ ac2.createMediaStreamSource(mediaStreamDestination1.stream),
+ sp2 = ac2.createScriptProcessor(),
+ silentBuffersInARow = 0;
+
+
+ sp2.onaudioprocess = function(e) {
+ ac1.suspend().then(function() {
+ is(ac1.state, "suspended", "ac1 is suspended");
+ sp2.onaudioprocess = checkSilence;
+ });
+ sp2.onaudioprocess = null;
+ }
+
+ function checkSilence(e) {
+ var input = e.inputBuffer.getChannelData(0);
+ var silent = true;
+ for (var i = 0; i < input.length; i++) {
+ if (input[i] != 0.0) {
+ silent = false;
+ }
+ }
+
+ if (silent) {
+ silentBuffersInARow++;
+ if (silentBuffersInARow == 10) {
+ ok(true,
+ "MediaStreams produce silence when their input is blocked.");
+ sp2.onaudioprocess = null;
+ ac1.close();
+ ac2.close();
+ finish();
+ }
+ } else {
+ is(silentBuffersInARow, 0,
+ "No non silent buffer inbetween silent buffers.");
+ }
+ }
+
+ osc1.connect(mediaStreamDestination1);
+
+ mediaStreamAudioSourceNode2.connect(sp2);
+ osc1.start();
+ }
+}
+
+
+// Test that there is no buffering between contexts when connecting a running
+// AudioContext to a suspended AudioContext. Our ScriptProcessorNode does some
+// buffering internally, so we ensure this by using a very very low frequency
+// on a sine, and oberve that the phase has changed by a big enough margin.
+function testMultiContextInput() {
+ var ac1 = new AudioContext(),
+ ac2 = new AudioContext();
+
+ ac1.onstatechange = function() {
+ ac1.onstatechange = null;
+
+ var osc1 = ac1.createOscillator(),
+ mediaStreamDestination1 = ac1.createMediaStreamDestination(),
+ sp1 = ac1.createScriptProcessor();
+
+ var mediaStreamAudioSourceNode2 =
+ ac2.createMediaStreamSource(mediaStreamDestination1.stream),
+ sp2 = ac2.createScriptProcessor(),
+ eventReceived = 0;
+
+
+ osc1.frequency.value = 0.0001;
+
+ function checkDiscontinuity(e) {
+ var inputBuffer = e.inputBuffer.getChannelData(0);
+ if (eventReceived++ == 3) {
+ var delta = Math.abs(inputBuffer[1] - sp2.value),
+ theoreticalIncrement = 2048 * 3 * Math.PI * 2 * osc1.frequency.value / ac1.sampleRate;
+ ok(delta >= theoreticalIncrement,
+ "Buffering did not occur when the context was suspended (delta:" + delta + " increment: " + theoreticalIncrement+")");
+ ac1.close();
+ ac2.close();
+ sp1.onaudioprocess = null;
+ sp2.onaudioprocess = null;
+ finish();
+ }
+ }
+
+ sp2.onaudioprocess = function(e) {
+ var inputBuffer = e.inputBuffer.getChannelData(0);
+ sp2.value = inputBuffer[inputBuffer.length - 1];
+ ac2.suspend().then(function() {
+ ac2.resume().then(function() {
+ sp2.onaudioprocess = checkDiscontinuity;
+ });
+ });
+ }
+
+ osc1.connect(mediaStreamDestination1);
+ osc1.connect(sp1);
+
+ mediaStreamAudioSourceNode2.connect(sp2);
+ osc1.start();
+ }
+}
+
+// Test that ScriptProcessorNode's onaudioprocess don't get called while the
+// context is suspended/closed. It is possible that we get the handler called
+// exactly once after suspend, because the event has already been sent to the
+// event loop.
+function testScriptProcessNodeSuspended() {
+ var ac = new AudioContext();
+ var sp = ac.createScriptProcessor();
+ var remainingIterations = 30;
+ var afterResume = false;
+ ac.onstatechange = function() {
+ ac.onstatechange = null;
+ sp.onaudioprocess = function() {
+ ok(ac.state == "running", "If onaudioprocess is called, the context" +
+ " must be running (was " + ac.state + ", remainingIterations:" + remainingIterations +")");
+ remainingIterations--;
+ if (!afterResume) {
+ if (remainingIterations == 0) {
+ ac.suspend().then(function() {
+ ac.resume().then(function() {
+ remainingIterations = 30;
+ afterResume = true;
+ });
+ });
+ }
+ } else {
+ sp.onaudioprocess = null;
+ finish();
+ }
+ }
+ }
+ sp.connect(ac.destination);
+}
+
+// Take an AudioContext, make sure it switches to running when the audio starts
+// flowing, and then, call suspend, resume and close on it, tracking its state.
+function testAudioContext() {
+ var ac = new AudioContext();
+ is(ac.state, "suspended", "AudioContext should start in suspended state.");
+ var stateTracker = {
+ previous: ac.state,
+ // no promise for the initial suspended -> running
+ initial: { handler: false },
+ suspend: { promise: false, handler: false },
+ resume: { promise: false, handler: false },
+ close: { promise: false, handler: false }
+ };
+
+ function initialSuspendToRunning() {
+ ok(stateTracker.previous == "suspended" &&
+ ac.state == "running",
+ "AudioContext should switch to \"running\" when the audio hardware is" +
+ " ready.");
+
+ stateTracker.previous = ac.state;
+ ac.onstatechange = afterSuspend;
+ stateTracker.initial.handler = true;
+
+ ac.suspend().then(function() {
+ ok(!stateTracker.suspend.promise && !stateTracker.suspend.handler,
+ "Promise should be resolved before the callback, and only once.")
+ stateTracker.suspend.promise = true;
+ });
+ }
+
+ function afterSuspend() {
+ ok(stateTracker.previous == "running" &&
+ ac.state == "suspended",
+ "AudioContext should switch to \"suspend\" when the audio stream is" +
+ "suspended.");
+ ok(stateTracker.suspend.promise && !stateTracker.suspend.handler,
+ "Handler should be called after the callback, and only once");
+
+ stateTracker.suspend.handler = true;
+ stateTracker.previous = ac.state;
+ ac.onstatechange = afterResume;
+
+ ac.resume().then(function() {
+ ok(!stateTracker.resume.promise && !stateTracker.resume.handler,
+ "Promise should be called before the callback, and only once");
+ stateTracker.resume.promise = true;
+ });
+ }
+
+ function afterResume() {
+ ok(stateTracker.previous == "suspended" &&
+ ac.state == "running",
+ "AudioContext should switch to \"running\" when the audio stream resumes.");
+
+ ok(stateTracker.resume.promise && !stateTracker.resume.handler,
+ "Handler should be called after the callback, and only once");
+
+ stateTracker.resume.handler = true;
+ stateTracker.previous = ac.state;
+ ac.onstatechange = afterClose;
+
+ ac.close().then(function() {
+ ok(!stateTracker.close.promise && !stateTracker.close.handler,
+ "Promise should be called before the callback, and only once");
+ stateTracker.close.promise = true;
+ tryToCreateNodeOnClosedContext(ac);
+ tryLegalOpeerationsOnClosedContext(ac);
+ });
+ }
+
+ function afterClose() {
+ ok(stateTracker.previous == "running" &&
+ ac.state == "closed",
+ "AudioContext should switch to \"closed\" when the audio stream is" +
+ " closed.");
+ ok(stateTracker.close.promise && !stateTracker.close.handler,
+ "Handler should be called after the callback, and only once");
+ }
+
+ ac.onstatechange = initialSuspendToRunning;
+}
+
+function testOfflineAudioContext() {
+ var o = new OfflineAudioContext(1, 44100, 44100);
+ is(o.state, "suspended", "OfflineAudioContext should start in suspended state.");
+
+ expectRejectedPromise(o, "resume", "NotSupportedError");
+
+ var previousState = o.state,
+ finishedRendering = false;
+ function beforeStartRendering() {
+ ok(previousState == "suspended" && o.state == "running", "onstatechanged" +
+ "handler is called on state changed, and the new state is running");
+ previousState = o.state;
+ o.onstatechange = onRenderingFinished;
+ }
+
+ function onRenderingFinished() {
+ ok(previousState == "running" && o.state == "closed",
+ "onstatechanged handler is called when rendering finishes, " +
+ "and the new state is closed");
+ ok(finishedRendering, "The Promise that is resolved when the rendering is" +
+ "done should be resolved earlier than the state change.");
+ previousState = o.state;
+ o.onstatechange = afterRenderingFinished;
+
+ tryToCreateNodeOnClosedContext(o);
+ tryLegalOpeerationsOnClosedContext(o);
+ }
+
+ function afterRenderingFinished() {
+ ok(false, "There should be no transition out of the closed state.");
+ }
+
+ o.onstatechange = beforeStartRendering;
+
+ o.startRendering().then(function(buffer) {
+ finishedRendering = true;
+ });
+}
+
+function testSuspendResumeEventLoop() {
+ var ac = new AudioContext();
+ var source = ac.createBufferSource();
+ source.buffer = ac.createBuffer(1, 44100, 44100);
+ source.onended = function() {
+ ok(true, "The AudioContext did resume.");
+ finish();
+ }
+ ac.onstatechange = function() {
+ ac.onstatechange = null;
+
+ ok(ac.state == "running", "initial state is running");
+ ac.suspend();
+ source.start();
+ ac.resume();
+ }
+}
+
+function testResumeInStateChangeForResumeCallback() {
+ // Regression test for bug 1468085.
+ var ac = new AudioContext;
+ ac.onstatechange = function() {
+ ac.resume().then(() => {
+ ok(true, "resume promise resolved as expected.");
+ finish();
+ });
+ }
+}
+
+var remaining = 0;
+function finish() {
+ remaining--;
+ if (remaining == 0) {
+ SimpleTest.finish();
+ }
+}
+
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var tests = [
+ testOfflineAudioContext,
+ testScriptProcessNodeSuspended,
+ testMultiContextOutput,
+ testMultiContextInput,
+ testSuspendResumeEventLoop,
+ testResumeInStateChangeForResumeCallback
+ ];
+
+ // See Bug 1305136, many intermittent failures on Linux
+ if (!navigator.platform.startsWith("Linux")) {
+ tests.push(testAudioContext);
+ }
+
+ remaining = tests.length;
+ tests.forEach(function(f) { f() });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioDestinationNode.html b/dom/media/webaudio/test/test_audioDestinationNode.html
new file mode 100644
index 0000000000..e7a8b091ba
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioDestinationNode.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioDestinationNode as EventTarget</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+var ac = new AudioContext()
+ac.destination.addEventListener("foo", function() {
+ ok(true, "Event received!");
+ SimpleTest.finish();
+});
+ac.destination.dispatchEvent(new CustomEvent("foo"));
+
+</script>
+</pre>
+</body>
+</html>
+
diff --git a/dom/media/webaudio/test/test_audioParamChaining.html b/dom/media/webaudio/test/test_audioParamChaining.html
new file mode 100644
index 0000000000..85b8099e2e
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamChaining.html
@@ -0,0 +1,77 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish()
+
+function frameToTime(frame, rate)
+{
+ return frame / rate;
+}
+
+const RATE = 44100;
+
+var oc = new OfflineAudioContext(1, 44100, RATE);
+// This allows us to have a source that is simply a DC offset.
+var source = oc.createBufferSource();
+var buf = oc.createBuffer(1, 1, RATE);
+buf.getChannelData(0)[0] = 1;
+source.loop = true;
+source.buffer = buf;
+
+source.start(0);
+
+var gain = oc.createGain();
+
+source.connect(gain).connect(oc.destination);
+
+var gain2 = oc.createGain();
+var rv2 = gain2.gain.linearRampToValueAtTime(0.1, 0.5);
+ok(rv2 instanceof AudioParam, "linearRampToValueAtTime returns an AudioParam.");
+ok(rv2 == gain2.gain, "linearRampToValueAtTime returns the right AudioParam.");
+
+rv2 = gain2.gain.exponentialRampToValueAtTime(0.01, 1.0);
+ok(rv2 instanceof AudioParam,
+ "exponentialRampToValueAtTime returns an AudioParam.");
+ok(rv2 == gain2.gain,
+ "exponentialRampToValueAtTime returns the right AudioParam.");
+
+rv2 = gain2.gain.setTargetAtTime(1.0, 2.0, 0.1);
+ok(rv2 instanceof AudioParam, "setTargetAtTime returns an AudioParam.");
+ok(rv2 == gain2.gain, "setTargetAtTime returns the right AudioParam.");
+
+var array = new Float32Array(10);
+rv2 = gain2.gain.setValueCurveAtTime(array, 10, 11);
+ok(rv2 instanceof AudioParam, "setValueCurveAtTime returns an AudioParam.");
+ok(rv2 == gain2.gain, "setValueCurveAtTime returns the right AudioParam.");
+
+// We chain three automation methods, making a gain step.
+var rv = gain.gain.setValueAtTime(0, frameToTime(0, RATE))
+ .setValueAtTime(0.5, frameToTime(22000, RATE))
+ .setValueAtTime(1, frameToTime(44000, RATE));
+
+ok(rv instanceof AudioParam, "setValueAtTime returns an AudioParam.");
+ok(rv == gain.gain, "setValueAtTime returns the right AudioParam.");
+
+oc.startRendering().then(function(rendered) {
+ console.log(rendered.getChannelData(0));
+ is(rendered.getChannelData(0)[0], 0,
+ "The value of the first step is correct.");
+ is(rendered.getChannelData(0)[22050], 0.5,
+ "The value of the second step is correct");
+ is(rendered.getChannelData(0)[44099], 1,
+ "The value of the third step is correct.");
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamExponentialRamp.html b/dom/media/webaudio/test/test_audioParamExponentialRamp.html
new file mode 100644
index 0000000000..2416b5de14
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamExponentialRamp.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.exponentialRampToValue</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var V0 = 0.1;
+var V1 = 0.9;
+var T0 = 0;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(V0, 0);
+ gain.gain.exponentialRampToValueAtTime(V1, 2048/context.sampleRate);
+
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var T1 = 2048 / context.sampleRate;
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ var t = i / context.sampleRate;
+ expectedBuffer.getChannelData(0)[i] = V0 * Math.pow(V1 / V0, (t - T0) / (T1 - T0));
+ }
+ return expectedBuffer;
+ },
+};
+
+
+SimpleTest.waitForExplicitFinish();
+// Comparing different AudioContexts may result in different timing reated information being reported
+// when we jitter time, as they are on different Relative Timelines.
+SpecialPowers.pushPrefEnv({"set": [["privacy.resistFingerprinting.reduceTimerPrecision.jitter", false]]}, runTest);
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamGain.html b/dom/media/webaudio/test/test_audioParamGain.html
new file mode 100644
index 0000000000..3977b94703
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamGain.html
@@ -0,0 +1,61 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam with pre-gain </title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+var ctx = new AudioContext();
+var source = ctx.createOscillator();
+var lfo = ctx.createOscillator();
+var lfoIntensity = ctx.createGain();
+var effect = ctx.createGain();
+var sp = ctx.createScriptProcessor(2048, 1);
+
+source.frequency.value = 440;
+lfo.frequency.value = 2;
+// Very low gain, so the LFO should have very little influence
+// on the source, its RMS value should be close to the nominal value
+// for a sine wave.
+lfoIntensity.gain.value = 0.0001;
+
+lfo.connect(lfoIntensity);
+lfoIntensity.connect(effect.gain);
+source.connect(effect);
+effect.connect(sp);
+
+sp.onaudioprocess = function(e) {
+ var buffer = e.inputBuffer.getChannelData(0);
+ var rms = 0;
+ for (var i = 0; i < buffer.length; i++) {
+ rms += buffer[i] * buffer[i];
+ }
+
+ rms /= buffer.length;
+ rms = Math.sqrt(rms);
+
+ // 1 / Math.sqrt(2) is the theoretical RMS value for a sine wave.
+ ok(fuzzyCompare(rms, 1 / Math.sqrt(2)),
+ "Gain correctly applied to the AudioParam.");
+
+ ctx = null;
+ sp.onaudioprocess = null;
+ lfo.stop(0);
+ source.stop(0);
+
+ SimpleTest.finish();
+}
+
+lfo.start(0);
+source.start(0);
+
+</script>
+</pre>
+</body>
diff --git a/dom/media/webaudio/test/test_audioParamLinearRamp.html b/dom/media/webaudio/test/test_audioParamLinearRamp.html
new file mode 100644
index 0000000000..5ec26467e8
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamLinearRamp.html
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.linearRampToValue</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var V0 = 0.1;
+var V1 = 0.9;
+var T0 = 0;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(V0, 0);
+ gain.gain.linearRampToValueAtTime(V1, 2048/context.sampleRate);
+
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var T1 = 2048 / context.sampleRate;
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ var t = i / context.sampleRate;
+ expectedBuffer.getChannelData(0)[i] = V0 + (V1 - V0) * ((t - T0) / (T1 - T0));
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamSetCurveAtTime.html b/dom/media/webaudio/test/test_audioParamSetCurveAtTime.html
new file mode 100644
index 0000000000..e21b58bb19
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamSetCurveAtTime.html
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.linearRampToValue</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var T0 = 0;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createConstantSource();
+
+ var gain = context.createGain();
+ gain.gain.setValueCurveAtTime(this.curve, T0, this.duration);
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ this.duration = 1024 / context.sampleRate;
+ this.curve = new Float32Array([1.0, 0.5, 0.75, 0.25]);
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ var data = expectedBuffer.getChannelData(0);
+ var step = 1024 / 3;
+ for (var i = 0; i < 2048; ++i) {
+ if (i < step) {
+ data[i] = 1.0 - 0.5*i/step;
+ } else if (i < 2*step) {
+ data[i] = 0.5 + 0.25*(i - step)/step;
+ } else if (i < 3*step) {
+ data[i] = 0.75 - 0.5*(i - 2*step)/step;
+ } else {
+ data[i] = 0.25;
+ }
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamSetTargetAtTime.html b/dom/media/webaudio/test/test_audioParamSetTargetAtTime.html
new file mode 100644
index 0000000000..3328519f12
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamSetTargetAtTime.html
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.setTargetAtTime</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var V0 = 0.9;
+var V1 = 0.1;
+var T0 = 0;
+var TimeConstant = 10;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+ gain.gain.value = V0;
+ gain.gain.setTargetAtTime(V1, T0, TimeConstant);
+
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var T1 = 2048 / context.sampleRate;
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ var t = i / context.sampleRate;
+ expectedBuffer.getChannelData(0)[i] = V1 + (V0 - V1) * Math.exp(-(t - T0) / TimeConstant);
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html b/dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html
new file mode 100644
index 0000000000..9982023c21
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.setTargetAtTime with zero time constant</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var V0 = 0.9;
+var V1 = 0.1;
+var T0 = 0;
+var TimeConstant = 0;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+ gain.gain.value = V0;
+ gain.gain.setTargetAtTime(V1, T0, TimeConstant);
+
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var T1 = 2048 / context.sampleRate;
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ var t = i / context.sampleRate;
+ expectedBuffer.getChannelData(0)[i] = V1;
+ }
+ return expectedBuffer;
+ },
+};
+
+SimpleTest.waitForExplicitFinish();
+// Comparing different AudioContexts may result in different timing reated information being reported
+// when we jitter time, as they are on different Relative Timelines.
+SpecialPowers.pushPrefEnv({"set": [["privacy.resistFingerprinting.reduceTimerPrecision.jitter", false]]}, runTest);
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamSetValueAtTime.html b/dom/media/webaudio/test/test_audioParamSetValueAtTime.html
new file mode 100644
index 0000000000..18c02837e6
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamSetValueAtTime.html
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.linearRampToValue</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var V0 = 0.1;
+var V1 = 0.9;
+var T0 = 0;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+ gain.gain.value = 0;
+ gain.gain.setValueAtTime(V0, 1024/context.sampleRate);
+
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 1024; i < 2048; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 0.1;
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html b/dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html
new file mode 100644
index 0000000000..76db12f88a
--- /dev/null
+++ b/dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html
@@ -0,0 +1,45 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam timeline events scheduled after the destination stream has started playback</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.requestFlakyTimeout("This test needs to wait until the AudioDestinationNode's stream's timer starts.");
+
+var gTest = {
+ length: 16384,
+ numberOfChannels: 1,
+ createGraphAsync(context, callback) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ setTimeout(function() {
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+ source.start(context.currentTime);
+ source.stop(context.currentTime + sourceBuffer.duration);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(0, context.currentTime);
+ gain.gain.setTargetAtTime(0, context.currentTime + sourceBuffer.duration, 1);
+ source.connect(gain);
+
+ callback(gain);
+ }, 100);
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_badConnect.html b/dom/media/webaudio/test/test_badConnect.html
new file mode 100644
index 0000000000..51e83f6088
--- /dev/null
+++ b/dom/media/webaudio/test/test_badConnect.html
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context1 = new OfflineAudioContext(1, 128, 44100);
+ var context2 = new OfflineAudioContext(1, 128, 44100);
+
+ var destination1 = context1.destination;
+ var destination2 = context2.destination;
+ var gain1 = new GainNode(context2);
+
+ isnot(destination1, destination2, "Destination nodes should not be the same");
+ isnot(destination1.context, destination2.context, "Destination nodes should not have the same context");
+
+ var source1 = context1.createBufferSource();
+
+ expectException(function() {
+ source1.connect(destination1, 1);
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ source1.connect(destination1, 0, 1);
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ source1.connect(destination2);
+ }, DOMException.INVALID_ACCESS_ERR);
+ expectException(function() {
+ source1.connect(gain1.gain);
+ }, DOMException.INVALID_ACCESS_ERR);
+
+ source1.connect(destination1);
+
+ expectException(function() {
+ source1.disconnect(1);
+ }, DOMException.INDEX_SIZE_ERR);
+
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_biquadFilterNode.html b/dom/media/webaudio/test/test_biquadFilterNode.html
new file mode 100644
index 0000000000..561198c491
--- /dev/null
+++ b/dom/media/webaudio/test/test_biquadFilterNode.html
@@ -0,0 +1,86 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+function near(a, b, msg) {
+ ok(Math.abs(a - b) < 1e-3, msg);
+}
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var destination = context.destination;
+
+ var source = context.createBufferSource();
+
+ var filter = context.createBiquadFilter();
+
+ source.buffer = buffer;
+
+ source.connect(filter);
+ filter.connect(destination);
+
+ // Verify default values
+ is(filter.type, "lowpass", "Correct default value for type");
+ near(filter.frequency.defaultValue, 350, "Correct default value for filter frequency");
+ near(filter.detune.defaultValue, 0, "Correct default value for filter detune");
+ near(filter.Q.defaultValue, 1, "Correct default value for filter Q");
+ near(filter.gain.defaultValue, 0, "Correct default value for filter gain");
+ is(filter.channelCount, 2, "Biquad filter node has 2 input channels by default");
+ is(filter.channelCountMode, "max", "Correct channelCountMode for the biquad filter node");
+ is(filter.channelInterpretation, "speakers", "Correct channelCountInterpretation for the biquad filter node");
+
+ // Make sure that we can set all of the valid type values
+ var types = [
+ "lowpass",
+ "highpass",
+ "bandpass",
+ "lowshelf",
+ "highshelf",
+ "peaking",
+ "notch",
+ "allpass",
+ ];
+ for (var i = 0; i < types.length; ++i) {
+ filter.type = types[i];
+ }
+
+ // Make sure getFrequencyResponse handles invalid frequencies properly
+ var frequencies = new Float32Array([-1.0, context.sampleRate*0.5 - 1.0, context.sampleRate]);
+ var magResults = new Float32Array(3);
+ var phaseResults = new Float32Array(3);
+ filter.getFrequencyResponse(frequencies, magResults, phaseResults);
+ ok(isNaN(magResults[0]), "Invalid input frequency should give NaN magnitude response");
+ ok(!isNaN(magResults[1]), "Valid input frequency should not give NaN magnitude response");
+ ok(isNaN(magResults[2]), "Invalid input frequency should give NaN magnitude response");
+ ok(isNaN(phaseResults[0]), "Invalid input frquency should give NaN phase response");
+ ok(!isNaN(phaseResults[1]), "Valid input frquency should not give NaN phase response");
+ ok(isNaN(phaseResults[2]), "Invalid input frquency should give NaN phase response");
+
+ source.start(0);
+ SimpleTest.executeSoon(function() {
+ source.stop(0);
+ source.disconnect();
+ filter.disconnect();
+
+ SimpleTest.finish();
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_biquadFilterNodePassThrough.html b/dom/media/webaudio/test/test_biquadFilterNodePassThrough.html
new file mode 100644
index 0000000000..4db01b0b14
--- /dev/null
+++ b/dom/media/webaudio/test/test_biquadFilterNodePassThrough.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var filter = context.createBiquadFilter();
+
+ source.buffer = this.buffer;
+
+ source.connect(filter);
+
+ var filterWrapped = SpecialPowers.wrap(filter);
+ ok("passThrough" in filterWrapped, "BiquadFilterNode should support the passThrough API");
+ filterWrapped.passThrough = true;
+
+ source.start(0);
+ return filter;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_biquadFilterNodeWithGain.html b/dom/media/webaudio/test/test_biquadFilterNodeWithGain.html
new file mode 100644
index 0000000000..d97a753de9
--- /dev/null
+++ b/dom/media/webaudio/test/test_biquadFilterNodeWithGain.html
@@ -0,0 +1,61 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test BiquadFilterNode after a GainNode and tail - Bugs 924286 and 924288</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+const signalLength = 2048;
+
+var gTest = {
+ length: signalLength,
+ numberOfChannels: 1,
+ createGraph(context) {
+ // Two oscillators scheduled sequentially
+ var signalDuration = signalLength / context.sampleRate;
+ var osc1 = context.createOscillator();
+ osc1.type = "square";
+ osc1.start(0);
+ osc1.stop(signalDuration / 2);
+ var osc2 = context.createOscillator();
+ osc2.start(signalDuration / 2);
+ osc2.stop(signalDuration);
+
+ // Comparing a biquad on each source with one on both sources checks that
+ // the biquad on the first source doesn't shut down early.
+ var biquad1 = context.createBiquadFilter();
+ osc1.connect(biquad1);
+ var biquad2 = context.createBiquadFilter();
+ osc2.connect(biquad2);
+
+ var gain = context.createGain();
+ gain.gain.value = -1;
+ osc1.connect(gain);
+ osc2.connect(gain);
+
+ var biquadWithGain = context.createBiquadFilter();
+ gain.connect(biquadWithGain);
+
+ // The output of biquadWithGain should be the inverse of the sum of the
+ // outputs of biquad1 and biquad2, so blend them together and expect
+ // silence.
+ var blend = context.createGain();
+ biquad1.connect(blend);
+ biquad2.connect(blend);
+ biquadWithGain.connect(blend);
+
+ return blend;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1027864.html b/dom/media/webaudio/test/test_bug1027864.html
new file mode 100644
index 0000000000..847485ff88
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1027864.html
@@ -0,0 +1,74 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test bug 1027864</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+function observer(subject, topic, data) {
+ var id = parseInt(data);
+ var index = ids.indexOf(id);
+ if (index != -1) {
+ ok(true, "Dropping id " + id + " at index " + index);
+ ids.splice(index, 1);
+ if (!ids.length) {
+ SimpleTest.executeSoon(function() {
+ SimpleTest.finish();
+ });
+ }
+ }
+}
+
+function id(node) {
+ return SpecialPowers.getPrivilegedProps(node, "id");
+}
+
+SpecialPowers.addAsyncObserver(observer, "webaudio-node-demise", false);
+
+SimpleTest.registerCleanupFunction(function() {
+ SpecialPowers.removeAsyncObserver(observer, "webaudio-node-demise");
+});
+
+var ac = new AudioContext();
+var ids;
+
+(function() {
+ var delay = ac.createDelay();
+ delay.delayTime.value = 0.03;
+
+ var gain = ac.createGain();
+ gain.gain.value = 0.6;
+
+ delay.connect(gain);
+ gain.connect(delay);
+
+ gain.connect(ac.destination);
+
+ var source = ac.createOscillator();
+
+ source.connect(gain);
+ source.start(ac.currentTime);
+ source.stop(ac.currentTime + 0.1);
+
+ ids = [ id(delay), id(gain), id(source) ];
+})();
+
+setInterval(function() {
+ forceCC();
+}, 200);
+
+function forceCC() {
+ SpecialPowers.DOMWindowUtils.cycleCollect();
+ SpecialPowers.DOMWindowUtils.garbageCollect();
+}
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1056032.html b/dom/media/webaudio/test/test_bug1056032.html
new file mode 100644
index 0000000000..ba38267e19
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1056032.html
@@ -0,0 +1,35 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset=utf-8>
+<head>
+ <title>Test that we can decode an mp3 (bug 1056032)</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+var filename = "small-shot.mp3";
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", filename);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = function() {
+ var context = new AudioContext();
+ context.decodeAudioData(xhr.response, function(b) {
+ ok(true, "We can decode an mp3 using decodeAudioData");
+ SimpleTest.finish();
+ }, function() {
+ ok(false, "We should be able to decode an mp3 using decodeAudioData but couldn't");
+ SimpleTest.finish();
+ });
+ };
+ xhr.send(null);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1113634.html b/dom/media/webaudio/test/test_bug1113634.html
new file mode 100644
index 0000000000..acdcba7c25
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1113634.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioParam.setTargetAtTime where the target time is the same as the time of a previous event</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var V0 = 0.9;
+var V1 = 0.1;
+var T0 = 0;
+var TimeConstant = 0.1;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(V0, T0);
+ gain.gain.setTargetAtTime(V1, T0, TimeConstant);
+
+ source.connect(gain);
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ var t = i / context.sampleRate;
+ expectedBuffer.getChannelData(0)[i] = V1 + (V0 - V1) * Math.exp(-(t - T0) / TimeConstant);
+ }
+ return expectedBuffer;
+ },
+};
+
+SimpleTest.waitForExplicitFinish();
+// Comparing different AudioContexts may result in different timing reated information being reported
+// when we jitter time, as they are on different Relative Timelines.
+SpecialPowers.pushPrefEnv({"set": [["privacy.resistFingerprinting.reduceTimerPrecision.jitter", false]]}, runTest);
+
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1118372.html b/dom/media/webaudio/test/test_bug1118372.html
new file mode 100644
index 0000000000..f049b221e8
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1118372.html
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test WaveShaperNode with no curve</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ var context = new OfflineAudioContext(1, 2048, 44100);
+
+ var osc=context.createOscillator();
+ var gain=context.createGain();
+ var shaper=context.createWaveShaper();
+ gain.gain.value=0.1;
+ shaper.curve=new Float32Array([-0.5,-0.5,1,1]);
+
+ osc.connect(gain);
+ gain.connect(shaper);
+ shaper.connect(context.destination);
+ osc.start(0);
+
+ context.startRendering().then(function(buffer) {
+ var samples = buffer.getChannelData(0);
+ // the signal should be scaled
+ var failures = 0;
+ for (var i = 0; i < 2048; ++i) {
+ if (samples[i] > 0.5) {
+ failures = failures + 1;
+ }
+ }
+ ok(failures == 0, "signal should have been rescaled by gain: found " + failures + " points too loud.");
+ SimpleTest.finish();
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1255618.html b/dom/media/webaudio/test/test_bug1255618.html
new file mode 100644
index 0000000000..15e7351995
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1255618.html
@@ -0,0 +1,41 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test sync XHR does not crash unlinked AudioContext</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script>
+SimpleTest.waitForExplicitFinish();
+
+const filename = "test_bug1255618.html";
+
+function collect_and_fetch() {
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", filename, false);
+ var ended = false;
+ xhr.onloadend = function() { ended = true; }
+ // Sync XHR will suspend timeouts, which involves any AudioContexts still
+ // registered with the window.
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=1255618#c0
+ xhr.send(null);
+
+ ok(ended, "No crash during fetch");
+ SimpleTest.finish();
+}
+
+var ac = new AudioContext();
+
+ac.onstatechange = function () {
+ ac.onstatechange = null;
+ is(ac.state, "running", "statechange to running");
+ ac = null;
+ SimpleTest.executeSoon(collect_and_fetch);
+}
+
+</script>
+</body>
diff --git a/dom/media/webaudio/test/test_bug1267579.html b/dom/media/webaudio/test/test_bug1267579.html
new file mode 100644
index 0000000000..7003b345f5
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1267579.html
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test that PeriodicWave handles fundamental fequency of zero</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// This is the smallest value that the test framework will accept
+const testLength = 256;
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ runTest();
+});
+
+var gTest = {
+ numberOfChannels: 1,
+ createGraph(context) {
+ var osc = context.createOscillator();
+ osc.setPeriodicWave(context.
+ createPeriodicWave(new Float32Array([0.0, 1.0]),
+ new Float32Array(2)));
+ osc.frequency.value = 0.0;
+ osc.start();
+ return osc;
+ },
+ createExpectedBuffers(context) {
+ var buffer = context.createBuffer(1, testLength, context.sampleRate);
+
+ for (var i = 0; i < buffer.length; ++i) {
+ buffer.getChannelData(0)[i] = 1.0;
+ }
+ return buffer;
+ },
+};
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1355798.html b/dom/media/webaudio/test/test_bug1355798.html
new file mode 100644
index 0000000000..9b46322bbc
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1355798.html
@@ -0,0 +1,30 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode produces output even when the even when the distance is
+ from the listener is zero, and the cone gain is present, regression test for
+ bug 1355798.</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+var off = new OfflineAudioContext(1, 128, 44100);
+var panner = off.createPanner();
+var osc = off.createOscillator();
+panner.setPosition(1, 1, 1);
+off.listener.setPosition(1, 1, 1);
+osc.connect(panner).connect(off.destination);
+panner.coneOuterAngle = 359;
+osc.start();
+off.startRendering().then(function(b) {
+ is(b.getChannelData(0).filter(x => isNaN(x)).length, 0);
+ SimpleTest.finish();
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug1447273.html b/dom/media/webaudio/test/test_bug1447273.html
new file mode 100644
index 0000000000..f4b473d8ab
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug1447273.html
@@ -0,0 +1,175 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test bug 1447273 - GainNode with a stereo input and changing volume</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <script type="text/javascript" src="head.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+/**
+ * Sets up a stereo BufferSource and plumbs it through different gain node
+ * configurations. A control gain path with no changes to gain is used along
+ * with 2 other paths which should increase their gain. The result should be
+ * that audio travelling along the increased gain paths is louder than the
+ * control path.
+ */
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout(
+ "This test uses a live audio context and uses a setTimeout to schedule a " +
+ "change to the graph.");
+addLoadEvent(function() {
+ let context = new AudioContext();
+
+ let numChannels = 2;
+ let sampleRate = context.sampleRate;
+ // 60 seconds to mitigate timing issues on slow test machines
+ let recordingLength = 60;
+ let bufferLength = sampleRate * recordingLength;
+ let gainExplicitlyIncreased = false;
+ let sourceFinished = false;
+
+ // Create source buffer
+ let sourceBuffer = context.createBuffer(numChannels, bufferLength, sampleRate);
+ for (let i = 0; i < bufferLength; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ sourceBuffer.getChannelData(1)[i] = 1;
+ }
+ let source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ let gainNoChange = context.createGain();
+ let gainExplicitAssignment = context.createGain();
+ let gainSetValueAtTime = context.createGain();
+
+ // All gain nodes start of with the same gain
+ gainNoChange.gain.value = 0.25;
+ gainExplicitAssignment.gain.value = 0.25;
+ gainSetValueAtTime.gain.value = 0.25;
+
+ // Connect source to gain nodes:
+ // source--> gainNoChange
+ // |-> gainExplicitAssignment
+ // \-> gainSetValueAtTime
+ source.connect(gainNoChange);
+ source.connect(gainExplicitAssignment);
+ source.connect(gainSetValueAtTime);
+
+ // Create intermediate media streams (required to repro bug 1447273)
+ let destNoChange = context.createMediaStreamDestination();
+ let destExplicitAssignement = context.createMediaStreamDestination();
+ let destSetValueAtTime = context.createMediaStreamDestination();
+
+ let sourceNoChange = context.createMediaStreamSource(destNoChange.stream);
+ let sourceExplicitAssignement = context.createMediaStreamSource(destExplicitAssignement.stream);
+ let sourceSetValueAtTime = context.createMediaStreamSource(destSetValueAtTime.stream);
+
+ // Connect gain nodes to our intermediate streams:
+ // source--> gainNoChange -> destNoChange -> sourceNoChange
+ // |-> gainExplicitAssignment -> destExplicitAssignement -> sourceExplicitAssignement
+ // \-> gainSetValueAtTime -> destSetValueAtTime -> sourceSetValueAtTime
+ gainNoChange.connect(destNoChange);
+ gainExplicitAssignment.connect(destExplicitAssignement);
+ gainSetValueAtTime.connect(destSetValueAtTime);
+
+ // Create analysers for each path
+ let analyserNoChange = context.createAnalyser();
+ let analyserExplicitAssignment = context.createAnalyser();
+ let analyserSetValueAtTime = context.createAnalyser();
+
+ // Connect our intermediate media streams to analysers:
+ // source--> gainNoChange -> destNoChange -> sourceNoChange -> analyserNoChange
+ // |-> gainExplicitAssignment -> destExplicitAssignement -> sourceExplicitAssignement -> analyserExplicitAssignment
+ // \-> gainSetValueAtTime -> destSetValueAtTime -> sourceSetValueAtTime -> analyserSetValueAtTime
+ sourceNoChange.connect(analyserNoChange);
+ sourceExplicitAssignement.connect(analyserExplicitAssignment);
+ sourceSetValueAtTime.connect(analyserSetValueAtTime);
+
+ // Two seconds in, increase gain for setValueAt path
+ gainSetValueAtTime.gain.setValueAtTime(0.5, 2);
+
+ // Maximum values seen at each analyser node, will be updated by
+ // checkAnalysersForMaxValues() during test.
+ let maxNoGainChange = 0;
+ let maxExplicitAssignment = 0;
+ let maxSetValueAtTime = 0;
+
+ // Poll analysers and check for max values
+ function checkAnalysersForMaxValues() {
+ let findMaxValue =
+ (array) => array.reduce((a, b) => Math.max(Math.abs(a), Math.abs(b)));
+
+ let dataArray = new Float32Array(analyserNoChange.fftSize);
+ analyserNoChange.getFloatTimeDomainData(dataArray);
+ maxNoGainChange = Math.max(maxNoGainChange, findMaxValue(dataArray));
+
+ analyserExplicitAssignment.getFloatTimeDomainData(dataArray);
+ maxExplicitAssignment = Math.max(maxExplicitAssignment, findMaxValue(dataArray));
+
+ analyserSetValueAtTime.getFloatTimeDomainData(dataArray);
+ maxSetValueAtTime = Math.max(maxSetValueAtTime, findMaxValue(dataArray));
+
+ // End test if we've met our conditions
+ // Add a small amount to initial gain to make sure we're not getting
+ // passes due to rounding errors.
+ let epsilon = 0.01;
+ if (maxExplicitAssignment > (maxNoGainChange + epsilon) &&
+ maxSetValueAtTime > (maxNoGainChange + epsilon)) {
+ source.stop();
+ }
+ }
+
+ source.onended = () => {
+ sourceFinished = true;
+ info(`maxNoGainChange: ${maxNoGainChange}`);
+ info(`maxExplicitAssignment: ${maxExplicitAssignment}`);
+ info(`maxSetValueAtTime: ${maxSetValueAtTime}`);
+ ok(gainExplicitlyIncreased,
+ "Gain should be explicitly assinged during test!");
+ // Add a small amount to initial gain to make sure we're not getting
+ // passes due to rounding errors.
+ let epsilon = 0.01;
+ ok(maxExplicitAssignment > (maxNoGainChange + epsilon),
+ "Volume should increase due to explicit assignment to gain.value");
+ ok(maxSetValueAtTime > (maxNoGainChange + epsilon),
+ "Volume should increase due to setValueAtTime on gain.value");
+ SimpleTest.finish();
+ };
+
+ // Start the graph
+ source.start(0);
+
+ // We'll use this callback to check our analysers for gain
+ function animationFrameCb() {
+ if (sourceFinished) {
+ return;
+ }
+ requestAnimationFrame(animationFrameCb);
+ checkAnalysersForMaxValues();
+ }
+
+ // Using timers is gross, but as of writing there doesn't appear to be a
+ // nicer way to perform gain.value = 0.5 on our node. When/if we support
+ // suspend(time) on offline contexts we could potentially use that instead.
+
+ // Roughly 2 seconds through our source buffer (setTimeout flakiness) increase
+ // our gain on gainExplicitAssignment path.
+ window.setTimeout(() => {
+ gainExplicitAssignment.gain.value = 0.5;
+ // Make debugging flaky timeouts in test easier
+ info("Gain explicitly set!")
+ gainExplicitlyIncreased = true;
+ // Start checking analysers, we do this only after changing volume to avoid
+ // possible starvation of this timeout from requestAnimationFrame calls.
+ animationFrameCb();
+ }, 2000);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug808374.html b/dom/media/webaudio/test/test_bug808374.html
new file mode 100644
index 0000000000..b255c0b9c3
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug808374.html
@@ -0,0 +1,22 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 808374</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+try {
+ var ctx = new AudioContext();
+ ctx.createBuffer(0, 1, ctx.sampleRate);
+} catch (e) {
+ ok(true, "The test should not crash during CC");
+}
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug827541.html b/dom/media/webaudio/test/test_bug827541.html
new file mode 100644
index 0000000000..f205c7edf9
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug827541.html
@@ -0,0 +1,24 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Tell the cycle collector about the audio contexts owned by nsGlobalWindow</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+ var iframe = document.createElementNS("http://www.w3.org/1999/xhtml", "iframe");
+ document.body.appendChild(iframe);
+ var frameWin = iframe.contentWindow;
+ new frameWin.AudioContext();
+ document.body.removeChild(iframe);
+ expectException(() => new frameWin.AudioContext(),
+ DOMException.INVALID_STATE_ERR);
+
+ // This test should not leak.
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug839753.html b/dom/media/webaudio/test/test_bug839753.html
new file mode 100644
index 0000000000..f3e6598116
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug839753.html
@@ -0,0 +1,18 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 839753</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+(new AudioContext()).destination.expando = null;
+ok(true, "The test should not trigger wrapper cache assertions");
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug845960.html b/dom/media/webaudio/test/test_bug845960.html
new file mode 100644
index 0000000000..17bf9a5700
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug845960.html
@@ -0,0 +1,18 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 845960</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+(new AudioContext()).decodeAudioData(new ArrayBuffer(0), function() {});
+ok(true, "Should not crash when the optional failure callback is not specified");
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug856771.html b/dom/media/webaudio/test/test_bug856771.html
new file mode 100644
index 0000000000..24a527ccd5
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug856771.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test for bug 856771</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+
+ var source = context.createBufferSource();
+ source.connect(context.destination);
+ ok(true, "Nothing should leak");
+
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug866570.html b/dom/media/webaudio/test/test_bug866570.html
new file mode 100644
index 0000000000..90bd8f6985
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug866570.html
@@ -0,0 +1,18 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 859600</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+(new AudioContext()).foo = null;
+ok(true, "The test should not fatally assert");
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug866737.html b/dom/media/webaudio/test/test_bug866737.html
new file mode 100644
index 0000000000..e8db6b76e8
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug866737.html
@@ -0,0 +1,36 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test for bug 866737</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var context = new AudioContext();
+
+(function() {
+ var d = context.createDelay();
+ var panner = context.createPanner();
+ d.connect(panner);
+ var gain = context.createGain();
+ panner.connect(gain);
+ gain.connect(context.destination);
+ gain.disconnect(0);
+})();
+
+SpecialPowers.forceGC();
+SpecialPowers.forceCC();
+
+var gain = context.createGain();
+gain.connect(context.destination);
+gain.disconnect(0);
+
+ok(true, "No crashes should happen!");
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug867089.html b/dom/media/webaudio/test/test_bug867089.html
new file mode 100644
index 0000000000..e5a5179530
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug867089.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 867089</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var ctx = new AudioContext();
+
+ // Test invalid playbackRate values for AudioBufferSourceNode.
+ var source = ctx.createBufferSource();
+ var buffer = ctx.createBuffer(2, 2048, 8000);
+ source.buffer = buffer;
+ source.playbackRate.value = 0.0;
+ source.connect(ctx.destination);
+ source.start(0);
+
+ var source2 = ctx.createBufferSource();
+ source2.buffer = buffer;
+ source2.playbackRate.value = -1.0;
+ source2.connect(ctx.destination);
+ source2.start(0);
+
+ var source3 = ctx.createBufferSource();
+ source3.buffer = buffer;
+ source3.playbackRate.value = 3000000.0;
+ source3.connect(ctx.destination);
+ source3.start(0);
+ ok(true, "We did not crash.");
+ SimpleTest.finish();
+});
+
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug867174.html b/dom/media/webaudio/test/test_bug867174.html
new file mode 100644
index 0000000000..e949bcec41
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug867174.html
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 867174</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var ctx = new AudioContext();
+
+ var source = ctx.createBufferSource();
+ var buffer = ctx.createBuffer(2, 2048, 8000);
+ source.playbackRate.setTargetAtTime(0, 2, 3);
+ var sp = ctx.createScriptProcessor();
+ source.connect(sp);
+ sp.connect(ctx.destination);
+ source.start(0);
+
+ sp.onaudioprocess = function(e) {
+ // Now set the buffer
+ source.buffer = buffer;
+
+ ok(true, "We did not crash.");
+ sp.onaudioprocess = null;
+ SimpleTest.finish();
+ };
+});
+
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug873335.html b/dom/media/webaudio/test/test_bug873335.html
new file mode 100644
index 0000000000..19fd6d4dae
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug873335.html
@@ -0,0 +1,22 @@
+<html>
+<head>
+<meta charset="UTF-8">
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<script>
+
+function boom()
+{
+ (new AudioContext()).createScriptProcessor().hamster = {};
+ SpecialPowers.forceCC();
+ SpecialPowers.forceGC();
+ ok(true, "test finished");
+ SimpleTest.finish();
+}
+
+SimpleTest.waitForExplicitFinish();
+
+</script>
+</head>
+
+<body onload="boom();"></body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug875221.html b/dom/media/webaudio/test/test_bug875221.html
new file mode 100644
index 0000000000..5eb017d011
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug875221.html
@@ -0,0 +1,239 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 875221</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("This test is generated by a fuzzer, so we leave these setTimeouts untouched.");
+
+try { o0 = document.createElement('audio'); } catch(e) { }
+try { (document.body || document.documentElement).appendChild(o0); } catch(e) { }
+try { o1 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o1.listener.dopplerFactor = 1; } catch(e) { }
+try { o2 = o1.createScriptProcessor(); } catch(e) { }
+try { o3 = o1.createChannelMerger(4); } catch(e) { }
+try { o1.listener.dopplerFactor = 3; } catch(e) { }
+try { o1.listener.setPosition(0, 134217728, 64) } catch(e) { }
+try { o1.listener.dopplerFactor = 15; } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o4 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { }
+try { o4.listener.speedOfSound = 2048; } catch(e) { }
+try { o4.listener.setPosition(32768, 1, 1) } catch(e) { }
+try { o5 = o1.createChannelSplitter(4); } catch(e) { }
+try { o4.listener.setVelocity(4, 1, 0) } catch(e) { }
+try { o4.startRendering(); } catch(e) { }
+try { o4.startRendering(); } catch(e) { }
+try { o4.listener.setPosition(64, 1, 0) } catch(e) { }
+try { o1.listener.setOrientation(4194304, 15, 8388608, 15, 1, 1) } catch(e) { }
+try { o1.listener.dopplerFactor = 256; } catch(e) { }
+try { o6 = o4.createDelay(16); } catch(e) { }
+try { o4.startRendering(); } catch(e) { }
+try { o4.listener.setOrientation(0, 1, 0, 0, 31, 1073741824) } catch(e) { }
+try { o4.listener.speedOfSound = 1; } catch(e) { }
+try { o1.listener.speedOfSound = 0; } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o6.connect(o3, 1, 0) } catch(e) { }
+try { o1.listener.setPosition(4294967296, 32, 1) } catch(e) { }
+try { o1.listener.speedOfSound = 0; } catch(e) { }
+try { o1.listener.speedOfSound = 0; } catch(e) { }
+try { o1.listener.setVelocity(1, 256, 0) } catch(e) { }
+try { o4.startRendering(); } catch(e) { }
+try { o3.disconnect() } catch(e) { }
+setTimeout("try { o4.startRendering(); } catch(e) { }",50)
+try { o4.listener.setOrientation(0, 0, 2048, 128, 16384, 127) } catch(e) { }
+try { o4.listener.setVelocity(0, 4, 1) } catch(e) { }
+try { o7 = o4.createScriptProcessor(1024, 4, 1); } catch(e) { }
+try { o8 = o4.createDynamicsCompressor(); } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+SpecialPowers.forceCC();
+SpecialPowers.forceGC();
+try { o4.listener.setOrientation(8192, 1, 1, 512, 0, 15) } catch(e) { }
+setTimeout("try { o7.onaudioprocess = function() {}; } catch(e) { }",50)
+try { o1.startRendering(); } catch(e) { }
+try { o1.listener.speedOfSound = 1073741824; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o9 = o4.createScriptProcessor(1024, 1, 4); } catch(e) { }
+try { o10 = o4.createAnalyser(); } catch(e) { }
+try { o4.listener.speedOfSound = 0; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+try { o4.listener.setVelocity(524288, 1, 65536) } catch(e) { }
+setTimeout("try { o2.connect(o9); } catch(e) { } setTimeout(done, 0);",1000)
+try { o7.connect(o4); } catch(e) { }
+try { o1.listener.setVelocity(1, 127, 31) } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+setTimeout("try { o5.disconnect() } catch(e) { }",100)
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o4.startRendering(); } catch(e) { }
+setTimeout("try { o1.listener.dopplerFactor = 1; } catch(e) { }",100)
+try { o5.disconnect() } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o10.disconnect() } catch(e) { }
+try { o1.startRendering(); } catch(e) { }
+try { o11 = o1.createGain(); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o4); } catch(e) { }
+try { o4.listener.setOrientation(31, 0, 15, 0, 33554432, 1) } catch(e) { }
+try { o4.listener.dopplerFactor = 1; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+setTimeout("try { o9.connect(o4); } catch(e) { }",50)
+try { o2.connect(o9); } catch(e) { }
+setTimeout("try { o9.connect(o1); } catch(e) { }",200)
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+try { o12 = o4.createDynamicsCompressor(); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+try { o9.onaudioprocess = function() {}; } catch(e) { }
+try { o1.listener.speedOfSound = 262144; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+setTimeout("try { o7.connect(o4); } catch(e) { }",50)
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o13 = o4.createGain(); } catch(e) { }
+try { o4.listener.dopplerFactor = 31; } catch(e) { }
+try { o11.gain.value = 268435456; } catch(e) { }
+try { o1.listener.setOrientation(63, 3, 1, 63, 1, 2147483648) } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o4.listener.setVelocity(1, 0, 1) } catch(e) { }
+try { o11.gain.value = 65536; } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+setTimeout("try { o7.connect(o4); } catch(e) { }",200)
+try { o14 = o4.createDynamicsCompressor(); } catch(e) { }
+setTimeout("try { o2.connect(o9); } catch(e) { }",50)
+try { o7.connect(o1); } catch(e) { }
+try { o15 = o1.createWaveShaper(); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+try { o16 = o1.createWaveShaper(); } catch(e) { }
+try { o11.gain.value = 1; } catch(e) { }
+try { o1.listener.speedOfSound = 16; } catch(e) { }
+try { o4.listener.setVelocity(0, 127, 15) } catch(e) { }
+try { o1.listener.setVelocity(0, 2048, 16777216) } catch(e) { }
+try { o13.gain.value = 0; } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+try { o17 = document.createElement('audio'); } catch(e) { }
+try { (document.body || document.documentElement).appendChild(o0); } catch(e) { }
+try { o4.listener.setVelocity(3, 1, 256) } catch(e) { }
+try { o11.gain.cancelScheduledValues(1) } catch(e) { }
+try { o1.listener.dopplerFactor = 524288; } catch(e) { }
+try { o9.onaudioprocess = function() {}; } catch(e) { }
+setTimeout("try { o7.connect(o13, 0, 0) } catch(e) { }",50)
+try { o1.listener.speedOfSound = 0; } catch(e) { }
+try { o10.disconnect() } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o9.connect(o4); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o4); } catch(e) { }
+try { o1.listener.speedOfSound = 1; } catch(e) { }
+try { o15.disconnect() } catch(e) { }
+try { o11.gain.exponentialRampToValueAtTime(0, 15) } catch(e) { }
+try { o15.curve = new Float32Array(15); } catch(e) { }
+try { o4.listener.setVelocity(1, 1, 1) } catch(e) { }
+try { o14.connect(o6, 0, 0) } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+setTimeout("try { o7.connect(o1); } catch(e) { }",100)
+try { o4.listener.setVelocity(1, 7, 1) } catch(e) { }
+try { o18 = document.createElement('audio'); } catch(e) { }
+try { (document.body || document.documentElement).appendChild(o18); } catch(e) { }
+try { o19 = o4.createGain(); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+try { o4.listener.dopplerFactor = 0; } catch(e) { }
+try { o1.listener.setPosition(256, 16, 1) } catch(e) { }
+setTimeout("try { o2.connect(o9); } catch(e) { }",50)
+try { o7.connect(o1); } catch(e) { }
+try { o4.listener.speedOfSound = 31; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+setTimeout("try { o9.connect(o4); } catch(e) { }",1000)
+try { o11.gain.value = 127; } catch(e) { }
+try { o7.connect(o7, 0, 0) } catch(e) { }
+try { o4.listener.speedOfSound = 63; } catch(e) { }
+try { o11.gain.value = 33554432; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o4); } catch(e) { }
+try { o4.listener.speedOfSound = 16; } catch(e) { }
+try { o4.listener.setVelocity(1048576, 0, 127) } catch(e) { }
+try { o1.listener.dopplerFactor = 0; } catch(e) { }
+try { o6.connect(o2, 0, 1) } catch(e) { }
+try { o5.disconnect() } catch(e) { }
+try { o3.disconnect() } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+try { o16.disconnect() } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+try { o9.disconnect() } catch(e) { }
+try { o4.listener.speedOfSound = 1; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o11.gain.setValueCurveAtTime(new Float32Array(3), 2048, 3) } catch(e) { }
+try { o13.gain.value = 8; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o4); } catch(e) { }
+try { o4.listener.setOrientation(1, 2048, 1, 1, 0, 31) } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o1); } catch(e) { }
+try { o1.listener.speedOfSound = 256; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o4); } catch(e) { }
+try { o4.listener.setVelocity(1, 67108864, 128) } catch(e) { }
+setTimeout("try { o1.listener.setVelocity(0, 1, 1) } catch(e) { }",100)
+try { o2.connect(o9); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+setTimeout("try { o20 = o1.createBiquadFilter(); } catch(e) { }",200)
+try { o13.gain.value = 4096; } catch(e) { }
+try { o1.listener.dopplerFactor = 0; } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+setTimeout("try { o2.connect(o9); } catch(e) { }",200)
+try { o7.connect(o1); } catch(e) { }
+try { o3.connect(o15, 1, 1) } catch(e) { }
+try { o2.connect(o12, 0, 0) } catch(e) { }
+try { o19.gain.exponentialRampToValueAtTime(1, 0) } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+
+function done() {
+ ok(true, "We did not crash.");
+ SimpleTest.finish();
+}
+
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug875402.html b/dom/media/webaudio/test/test_bug875402.html
new file mode 100644
index 0000000000..95c8e0e236
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug875402.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Crashtest for bug 875402</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+SimpleTest.requestFlakyTimeout("This test is generated by a fuzzer, so we leave these setTimeouts untouched.");
+
+try { o1 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { }
+try { o2 = o1.createScriptProcessor(); } catch(e) { }
+try { o4 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { }
+try { o5 = o1.createChannelSplitter(4); } catch(e) { }
+try { o7 = o4.createScriptProcessor(1024, 4, 1); } catch(e) { }
+SpecialPowers.forceCC();
+SpecialPowers.forceGC();
+try { o1.startRendering(); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o7.connect(o4); } catch(e) { }
+try { o9 = o4.createScriptProcessor(1024, 1, 4); } catch(e) { }
+try { o2.connect(o7); } catch(e) { }
+try { o9.connect(o1); } catch(e) { }
+setTimeout("try { o2.connect(o9); } catch(e) { } done();",1000)
+try { o7.connect(o4); } catch(e) { }
+setTimeout("try { o5.disconnect() } catch(e) { }",100)
+try { o2.connect(o9); } catch(e) { }
+try { o4.startRendering(); } catch(e) { }
+try { o2.connect(o9); } catch(e) { }
+setTimeout("try { o7.connect(o4); } catch(e) { }",50)
+try { o13 = o4.createGain(); } catch(e) { }
+setTimeout("try { o7.connect(o13, 0, 0) } catch(e) { }",50)
+
+function done() {
+ ok(true, "We did not crash.");
+ SimpleTest.finish();
+}
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug894150.html b/dom/media/webaudio/test/test_bug894150.html
new file mode 100644
index 0000000000..4577232d71
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug894150.html
@@ -0,0 +1,21 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script>
+
+var ac = new AudioContext();
+ac.createPanner();
+var listener = ac.listener;
+SpecialPowers.forceGC();
+SpecialPowers.forceCC();
+listener.setOrientation(0, 0, -1, 0, 0, 0);
+
+ok(true, "No crashes should happen!");
+
+</script>
+</body>
diff --git a/dom/media/webaudio/test/test_bug956489.html b/dom/media/webaudio/test/test_bug956489.html
new file mode 100644
index 0000000000..f0ae559b05
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug956489.html
@@ -0,0 +1,56 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test when and currentTime are in the same coordinate system</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("This test needs to wait a while for the AudioContext's timer to start.");
+addLoadEvent(function() {
+ var freq = 330;
+
+ var context = new AudioContext();
+
+ var buffer = context.createBuffer(1, context.sampleRate / freq, context.sampleRate);
+ for (var i = 0; i < buffer.length; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(2 * Math.PI * i / buffer.length);
+ }
+
+ var source = context.createBufferSource();
+ source.loop = true;
+ source.buffer = buffer;
+
+ setTimeout(function () {
+ var finished = false;
+
+ source.start(context.currentTime);
+ var processor = context.createScriptProcessor(256, 1, 1);
+ processor.onaudioprocess = function (e) {
+ if (finished) return;
+ var c = e.inputBuffer.getChannelData(0);
+ var result = true;
+
+ for (var i = 0; i < buffer.length; ++i) {
+ if (Math.abs(c[i] - buffer.getChannelData(0)[i]) > 1e-9) {
+ result = false;
+ break;
+ }
+ }
+ finished = true;
+ ok(result, "when and currentTime are in same time coordinate system");
+ SimpleTest.finish();
+ }
+ processor.connect(context.destination);
+ source.connect(processor);
+ }, 500);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug964376.html b/dom/media/webaudio/test/test_bug964376.html
new file mode 100644
index 0000000000..bc9d167dcd
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug964376.html
@@ -0,0 +1,64 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test repeating audio is not distorted</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function gcd(a, b) {
+ if (b === 0) {
+ return a;
+ }
+ return gcd(b, a % b);
+}
+
+var SAMPLE_PLACEMENT = 128;
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+
+ createGraph(context) {
+ var freq = Math.round(context.sampleRate / SAMPLE_PLACEMENT);
+ var dur = context.sampleRate / gcd(freq, context.sampleRate);
+ var buffer = context.createBuffer(1, dur, context.sampleRate);
+
+ for (var i = 0; i < context.sampleRate; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(freq * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.loop = true;
+ source.playbackRate.setValueAtTime(0.5, SAMPLE_PLACEMENT / context.sampleRate);
+ source.start(0);
+
+ return source;
+ },
+
+ createExpectedBuffers(context) {
+ var freq = Math.round(context.sampleRate / SAMPLE_PLACEMENT);
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ var c = expectedBuffer.getChannelData(0);
+ for (var i = 0; i < c.length; ++i) {
+ if (i < SAMPLE_PLACEMENT) {
+ c[i] = Math.sin(freq * 2 * Math.PI * i / context.sampleRate);
+ } else {
+ c[i] = Math.sin(freq / 2 * 2 * Math.PI * (i + SAMPLE_PLACEMENT) / context.sampleRate);
+ }
+ }
+
+ return expectedBuffer;
+ },
+};
+
+runTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug966247.html b/dom/media/webaudio/test/test_bug966247.html
new file mode 100644
index 0000000000..69831c33b6
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug966247.html
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether an audio file played with a volume set to 0 plays silence</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<audio preload=none src="ting-48k-1ch.ogg" controls> </audio>
+<script>
+ SimpleTest.waitForExplicitFinish();
+
+ var count = 20;
+
+ function isSilent(b) {
+ for (var i = 0; i < b.length; i++) {
+ if (b[i] != 0.0) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ var a = document.getElementsByTagName("audio")[0];
+ a.volume = 0.0;
+ var ac = new AudioContext();
+ var measn = ac.createMediaElementSource(a);
+ var sp = ac.createScriptProcessor();
+
+ sp.onaudioprocess = function(e) {
+ var inputBuffer = e.inputBuffer.getChannelData(0);
+ ok(isSilent(inputBuffer), "The volume is set to 0, so all the elements of the buffer are supposed to be equal to 0.0");
+ }
+ // Connect the MediaElementAudioSourceNode to the ScriptProcessorNode to check
+ // the audio volume.
+ measn.connect(sp);
+ a.play();
+
+ a.addEventListener("ended", function() {
+ sp.onaudioprocess = null;
+ SimpleTest.finish();
+ });
+
+</script>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_bug972678.html b/dom/media/webaudio/test/test_bug972678.html
new file mode 100644
index 0000000000..1450c19645
--- /dev/null
+++ b/dom/media/webaudio/test/test_bug972678.html
@@ -0,0 +1,62 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test buffers do not interfere when scheduled in sequence</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+var OFFSETS = [0.005, 0.01, 0.02, 0.03];
+var LENGTH = 128;
+
+var gTest = {
+ length: 128 * OFFSETS.length,
+ numberOfChannels: 1,
+
+ createGraph(context) {
+ var gain = context.createGain();
+
+ // create a repeating sample
+ var repeatingSample = context.createBuffer(1, 2, context.sampleRate);
+ var c = repeatingSample.getChannelData(0);
+ for (var i = 0; i < repeatingSample.length; ++i) {
+ c[i] = i % 2 == 0 ? 1 : -1;
+ }
+
+ OFFSETS.forEach(function (offset, offsetIdx) {
+ // Schedule a set of nodes to repeat the sample.
+ for (var i = 0; i < LENGTH; i += repeatingSample.length) {
+ var source = context.createBufferSource();
+ source.buffer = repeatingSample;
+ source.connect(gain);
+ source.start((offsetIdx * LENGTH + i + offset) / context.sampleRate);
+ }
+
+ buffer = context.createBuffer(1, LENGTH, context.sampleRate);
+ c = buffer.getChannelData(0);
+ for (var i = 0; i < buffer.length; ++i) {
+ c[i] = i % 2 == 0 ? -1 : 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.connect(gain);
+ source.start((offsetIdx * LENGTH + offset) / context.sampleRate);
+ });
+
+ return gain;
+ },
+
+ createExpectedBuffers(context) {
+ return context.createBuffer(1, gTest.length, context.sampleRate);
+ },
+};
+
+runTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_channelMergerNode.html b/dom/media/webaudio/test/test_channelMergerNode.html
new file mode 100644
index 0000000000..b62d34d6ba
--- /dev/null
+++ b/dom/media/webaudio/test/test_channelMergerNode.html
@@ -0,0 +1,57 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ChannelMergerNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 6,
+ createGraph(context) {
+ var buffers = [];
+ for (var j = 0; j < 6; ++j) {
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate);
+ // Second channel is silent
+ }
+ buffers.push(buffer);
+ }
+
+ var merger = new ChannelMergerNode(context);
+ is(merger.channelCount, 1, "merger node has 1 input channels");
+ is(merger.channelCountMode, "explicit", "Correct channelCountMode for the merger node");
+ is(merger.channelInterpretation, "speakers", "Correct channelCountInterpretation for the merger node");
+
+ for (var i = 0; i < 6; ++i) {
+ var source = context.createBufferSource();
+ source.buffer = buffers[i];
+ source.connect(merger, 0, i);
+ source.start(0);
+ }
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(6, 2048, context.sampleRate);
+ for (var i = 0; i < 6; ++i) {
+ for (var j = 0; j < 2048; ++j) {
+ expectedBuffer.getChannelData(i)[j] = 0.5 * Math.sin(440 * 2 * (i + 1) * Math.PI * j / context.sampleRate);
+ }
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_channelMergerNodeWithVolume.html b/dom/media/webaudio/test/test_channelMergerNodeWithVolume.html
new file mode 100644
index 0000000000..55b9ec0c0b
--- /dev/null
+++ b/dom/media/webaudio/test/test_channelMergerNodeWithVolume.html
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ChannelMergerNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 6,
+ createGraph(context) {
+ var buffers = [];
+ for (var j = 0; j < 6; ++j) {
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate);
+ // Second channel is silent
+ }
+ buffers.push(buffer);
+ }
+
+ var merger = context.createChannelMerger();
+ is(merger.channelCount, 1, "merger node has 1 input channels");
+ is(merger.channelCountMode, "explicit", "Correct channelCountMode for the merger node");
+ is(merger.channelInterpretation, "speakers", "Correct channelCountInterpretation for the merger node");
+
+ for (var i = 0; i < 6; ++i) {
+ var source = context.createBufferSource();
+ source.buffer = buffers[i];
+ var gain = context.createGain();
+ gain.gain.value = 0.5;
+ source.connect(gain);
+ gain.connect(merger, 0, i);
+ source.start(0);
+ }
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(6, 2048, context.sampleRate);
+ for (var i = 0; i < 6; ++i) {
+ for (var j = 0; j < 2048; ++j) {
+ expectedBuffer.getChannelData(i)[j] = 0.5 * 0.5 * Math.sin(440 * 2 * (i + 1) * Math.PI * j / context.sampleRate);
+ }
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_channelSplitterNode.html b/dom/media/webaudio/test/test_channelSplitterNode.html
new file mode 100644
index 0000000000..d74845f821
--- /dev/null
+++ b/dom/media/webaudio/test/test_channelSplitterNode.html
@@ -0,0 +1,71 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ChannelSplitterNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// We do not use our generic graph test framework here because
+// the splitter node is special in that it creates multiple
+// output ports.
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(4, 2048, context.sampleRate);
+ for (var j = 0; j < 4; ++j) {
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(j)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate);
+ }
+ }
+ var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
+
+ var destination = context.destination;
+
+ var source = context.createBufferSource();
+
+ var splitter = new ChannelSplitterNode(context);
+ is(splitter.channelCount, 6, "splitter node has 2 input channels by default");
+ is(splitter.channelCountMode, "explicit", "Correct channelCountMode for the splitter node");
+ is(splitter.channelInterpretation, "discrete", "Correct channelCountInterpretation for the splitter node");
+
+ source.buffer = buffer;
+ source.connect(splitter);
+
+ var channelsSeen = 0;
+ function createHandler(i) {
+ return function(e) {
+ is(e.inputBuffer.numberOfChannels, 1, "Correct input channel count");
+ if (i < 4) {
+ compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(i));
+ } else {
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
+ }
+ e.target.onaudioprocess = null;
+ ++channelsSeen;
+
+ if (channelsSeen == 6) {
+ SimpleTest.finish();
+ }
+ };
+ }
+
+ for (var i = 0; i < 6; ++i) {
+ var sp = context.createScriptProcessor(2048, 1);
+ splitter.connect(sp, i);
+ sp.onaudioprocess = createHandler(i);
+ sp.connect(destination);
+ }
+
+ source.start(0);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html b/dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html
new file mode 100644
index 0000000000..c03f6deeaf
--- /dev/null
+++ b/dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html
@@ -0,0 +1,76 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ChannelSplitterNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// We do not use our generic graph test framework here because
+// the splitter node is special in that it creates multiple
+// output ports.
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(4, 2048, context.sampleRate);
+ var expectedBuffer = context.createBuffer(4, 2048, context.sampleRate);
+ for (var j = 0; j < 4; ++j) {
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(j)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate);
+ expectedBuffer.getChannelData(j)[i] = buffer.getChannelData(j)[i] / 2;
+ }
+ }
+ var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
+
+ var destination = context.destination;
+
+ var source = context.createBufferSource();
+
+ var splitter = context.createChannelSplitter();
+ is(splitter.channelCount, 6, "splitter node has 2 input channels by default");
+ is(splitter.channelCountMode, "explicit", "Correct channelCountMode for the splitter node");
+ is(splitter.channelInterpretation, "discrete", "Correct channelCountInterpretation for the splitter node");
+
+ source.buffer = buffer;
+ var gain = context.createGain();
+ gain.gain.value = 0.5;
+ source.connect(gain);
+ gain.connect(splitter);
+
+ var channelsSeen = 0;
+ function createHandler(i) {
+ return function(e) {
+ is(e.inputBuffer.numberOfChannels, 1, "Correct input channel count");
+ if (i < 4) {
+ compareBuffers(e.inputBuffer.getChannelData(0), expectedBuffer.getChannelData(i));
+ } else {
+ compareBuffers(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
+ }
+ e.target.onaudioprocess = null;
+ ++channelsSeen;
+
+ if (channelsSeen == 6) {
+ SimpleTest.finish();
+ }
+ };
+ }
+
+ for (var i = 0; i < 6; ++i) {
+ var sp = context.createScriptProcessor(2048, 1);
+ splitter.connect(sp, i);
+ sp.onaudioprocess = createHandler(i);
+ sp.connect(destination);
+ }
+
+ source.start(0);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_convolver-upmixing-1-channel-response.html b/dom/media/webaudio/test/test_convolver-upmixing-1-channel-response.html
new file mode 100644
index 0000000000..50bd594821
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolver-upmixing-1-channel-response.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<title>Test that up-mixing signals in ConvolverNode processing is linear</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+const EPSILON = 3.0 * Math.pow(2, -22);
+// sampleRate is a power of two so that delay times are exact in base-2
+// floating point arithmetic.
+const SAMPLE_RATE = 32768;
+// Length of stereo convolver input in frames (arbitrary):
+const STEREO_FRAMES = 256;
+// Length of mono signal in frames. This is more than two blocks to ensure
+// that at least one block will be mono, even if interpolation in the
+// DelayNode means that stereo is output one block earlier and later than
+// if frames are delayed without interpolation.
+const MONO_FRAMES = 384;
+// Length of response buffer:
+const RESPONSE_FRAMES = 256;
+
+function test_linear_upmixing(channelInterpretation, initial_mono_frames)
+{
+ let stereo_input_end = initial_mono_frames + STEREO_FRAMES;
+ // Total length:
+ let length = stereo_input_end + RESPONSE_FRAMES + MONO_FRAMES + STEREO_FRAMES;
+ // The first two channels contain signal where some up-mixing occurs
+ // internally to a ConvolverNode when a stereo signal is added and removed.
+ // The last two channels are expected to contain the same signal, but mono
+ // and stereo signals are convolved independently before up-mixing the mono
+ // output to mix with the stereo output.
+ let context = new OfflineAudioContext({numberOfChannels: 4,
+ length,
+ sampleRate: SAMPLE_RATE});
+
+ let response = new AudioBuffer({numberOfChannels: 1,
+ length: RESPONSE_FRAMES,
+ sampleRate: context.sampleRate});
+
+ // Two stereo channel splitters will collect test and reference outputs.
+ let destinationMerger = new ChannelMergerNode(context, {numberOfInputs: 4});
+ destinationMerger.connect(context.destination);
+ let testSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ let referenceSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ testSplitter.connect(destinationMerger, 0, 0);
+ testSplitter.connect(destinationMerger, 1, 1);
+ referenceSplitter.connect(destinationMerger, 0, 2);
+ referenceSplitter.connect(destinationMerger, 1, 3);
+
+ // A GainNode mixes reference stereo and mono signals because up-mixing
+ // cannot be performed at a channel splitter.
+ let referenceGain = new GainNode(context);
+ referenceGain.connect(referenceSplitter);
+ referenceGain.channelInterpretation = channelInterpretation;
+
+ // The impulse response for convolution contains two impulses so as to test
+ // effects in at least two processing blocks.
+ response.getChannelData(0)[0] = 0.5;
+ response.getChannelData(0)[response.length - 1] = 0.5;
+
+ let testConvolver = new ConvolverNode(context, {disableNormalization: true,
+ buffer: response});
+ testConvolver.channelInterpretation = channelInterpretation;
+ let referenceMonoConvolver = new ConvolverNode(context,
+ {disableNormalization: true,
+ buffer: response});
+ let referenceStereoConvolver = new ConvolverNode(context,
+ {disableNormalization: true,
+ buffer: response});
+ // No need to set referenceStereoConvolver.channelInterpretation because
+ // input is either silent or stereo.
+ testConvolver.connect(testSplitter);
+ // Mix reference convolver output.
+ referenceMonoConvolver.connect(referenceGain);
+ referenceStereoConvolver.connect(referenceGain);
+
+ // The DelayNode initially has a single channel of silence, which is used to
+ // switch the stereo signal in and out. The output of the delay node is
+ // first mono silence (if there is a non-zero initial_mono_frames), then
+ // stereo, then mono silence, and finally stereo again. maxDelayTime is
+ // used to generate the middle mono silence period from the initial silence
+ // in the DelayNode and then generate the final period of stereo from its
+ // initial input.
+ let maxDelayTime = (length - STEREO_FRAMES) / context.sampleRate;
+ let delay =
+ new DelayNode(context,
+ {maxDelayTime,
+ delayTime: initial_mono_frames / context.sampleRate});
+ // Schedule an increase in the delay to return to mono silence.
+ delay.delayTime.setValueAtTime(maxDelayTime,
+ stereo_input_end / context.sampleRate);
+ delay.connect(testConvolver);
+ delay.connect(referenceStereoConvolver);
+
+ let stereoMerger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ stereoMerger.connect(delay);
+
+ // Three independent signals
+ let monoSignal = new OscillatorNode(context, {frequency: 440});
+ let leftSignal = new OscillatorNode(context, {frequency: 450});
+ let rightSignal = new OscillatorNode(context, {frequency: 460});
+ monoSignal.connect(testConvolver);
+ monoSignal.connect(referenceMonoConvolver);
+ leftSignal.connect(stereoMerger, 0, 0);
+ rightSignal.connect(stereoMerger, 0, 1);
+ monoSignal.start();
+ leftSignal.start();
+ rightSignal.start();
+
+ return context.startRendering().
+ then((buffer) => {
+ let maxDiff = -1.0;
+ let frameIndex = 0;
+ let channelIndex = 0;
+ for (let c = 0; c < 2; ++c) {
+ let testOutput = buffer.getChannelData(0 + c);
+ let referenceOutput = buffer.getChannelData(2 + c);
+ for (var i = 0; i < buffer.length; ++i) {
+ var diff = Math.abs(testOutput[i] - referenceOutput[i]);
+ if (diff > maxDiff) {
+ maxDiff = diff;
+ frameIndex = i;
+ channelIndex = c;
+ }
+ }
+ }
+ assert_approx_equals(buffer.getChannelData(0 + channelIndex)[frameIndex],
+ buffer.getChannelData(2 + channelIndex)[frameIndex],
+ EPSILON,
+ `output at ${frameIndex} ` +
+ `in channel ${channelIndex}` );
+ });
+}
+
+promise_test(() => test_linear_upmixing("speakers", MONO_FRAMES),
+ "speakers, initially mono");
+promise_test(() => test_linear_upmixing("discrete", MONO_FRAMES),
+ "discrete");
+// Gecko uses a separate path for "speakers" up-mixing when the convolver's
+// first input is stereo, so test that separately.
+promise_test(() => test_linear_upmixing("speakers", 0),
+ "speakers, initially stereo");
+</script>
diff --git a/dom/media/webaudio/test/test_convolverNode.html b/dom/media/webaudio/test/test_convolverNode.html
new file mode 100644
index 0000000000..c1677aafab
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNode.html
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the ConvolverNode interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var conv = new ConvolverNode(context);
+
+ is(conv.channelCount, 2, "Convolver node has 2 input channels by default");
+ is(conv.channelCountMode, "clamped-max", "Correct channelCountMode for the Convolver node");
+ is(conv.channelInterpretation, "speakers", "Correct channelCountInterpretation for the Convolver node");
+
+ is(conv.buffer, null, "Default buffer value");
+ is(conv.normalize, true, "Default normalize value");
+
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_convolverNodeChannelCount.html b/dom/media/webaudio/test/test_convolverNodeChannelCount.html
new file mode 100644
index 0000000000..03824578ea
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeChannelCount.html
@@ -0,0 +1,61 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ConvolverNode channel count</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+const signalLength = 2048;
+const responseLength = 1000;
+const outputLength = 2048; // < signalLength + responseLength to test bug 910171
+
+var gTest = {
+ length: outputLength,
+ numberOfChannels: 2,
+ createGraph(context) {
+ var buffer = context.createBuffer(2, signalLength, context.sampleRate);
+ for (var i = 0; i < signalLength; ++i) {
+ var sample = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ // When mixed into a single channel, this produces silence
+ buffer.getChannelData(0)[i] = sample;
+ buffer.getChannelData(1)[i] = -sample;
+ }
+
+ var response = context.createBuffer(2, responseLength, context.sampleRate);
+ for (var i = 0; i < responseLength; ++i) {
+ response.getChannelData(0)[i] = i / responseLength;
+ response.getChannelData(1)[i] = 1 - (i / responseLength);
+ }
+
+ var convolver = context.createConvolver();
+ convolver.buffer = response;
+ convolver.channelCount = 1;
+
+ expectException(function() { convolver.channelCount = 3; },
+ DOMException.NOT_SUPPORTED_ERR);
+ convolver.channelCountMode = "explicit";
+ expectException(function() { convolver.channelCountMode = "max"; },
+ DOMException.NOT_SUPPORTED_ERR);
+ convolver.channelInterpretation = "discrete";
+ convolver.channelInterpretation = "speakers";
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.connect(convolver);
+ source.start(0);
+
+ return convolver;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_convolverNodeChannelInterpretationChanges.html b/dom/media/webaudio/test/test_convolverNodeChannelInterpretationChanges.html
new file mode 100644
index 0000000000..bede517b2e
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeChannelInterpretationChanges.html
@@ -0,0 +1,169 @@
+<!DOCTYPE html>
+<title>Test up-mixing in ConvolverNode after ChannelInterpretation change</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+// This test is not in wpt because it requires that multiple changes to the
+// nodes in an AudioContext during a single event will be processed by the
+// audio thread in a single transaction. Gecko provides that, but this is not
+// currently required by the Web Audio API.
+
+const EPSILON = Math.pow(2, -23);
+// sampleRate is a power of two so that delay times are exact in base-2
+// floating point arithmetic.
+const SAMPLE_RATE = 32768;
+// Length of initial mono signal in frames, if the test has an initial mono
+// signal. This is more than one block to ensure that at least one block
+// will be mono, even if interpolation in the DelayNode means that stereo is
+// output one block earlier than if frames are delayed without interpolation.
+const MONO_FRAMES = 256;
+// Length of response buffer. This is greater than 1 to ensure that the
+// convolver has stereo output at least one block after stereo input is
+// disconnected.
+const RESPONSE_FRAMES = 2;
+
+function test_interpretation_change(t, initialInterpretation, initialMonoFrames)
+{
+ let context = new AudioContext({sampleRate: SAMPLE_RATE});
+
+ // Three independent signals. These are constant so that results are
+ // independent of the timing of the `ended` event.
+ let monoOffset = 0.25
+ let monoSource = new ConstantSourceNode(context, {offset: monoOffset});
+ let leftOffset = 0.125;
+ let rightOffset = 0.5;
+ let leftSource = new ConstantSourceNode(context, {offset: leftOffset});
+ let rightSource = new ConstantSourceNode(context, {offset: rightOffset});
+ monoSource.start();
+ leftSource.start();
+ rightSource.start();
+
+ let stereoMerger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ leftSource.connect(stereoMerger, 0, 0);
+ rightSource.connect(stereoMerger, 0, 1);
+
+ // The DelayNode initially has a single channel of silence, and so the
+ // output of the delay node is first mono silence (if there is a non-zero
+ // initialMonoFrames), then stereo. In Gecko, this triggers a convolver
+ // configuration that is different for different channelInterpretations.
+ let delay =
+ new DelayNode(context,
+ {maxDelayTime: MONO_FRAMES / context.sampleRate,
+ delayTime: initialMonoFrames / context.sampleRate});
+ stereoMerger.connect(delay);
+
+ // Two convolvers with the same impulse response. The test convolver will
+ // process a mix of stereo and mono signals. The reference convolver will
+ // always process stereo, including the up-mixed mono signal.
+ let response = new AudioBuffer({numberOfChannels: 1,
+ length: RESPONSE_FRAMES,
+ sampleRate: context.sampleRate});
+ response.getChannelData(0)[response.length - 1] = 1;
+
+ let testConvolver = new ConvolverNode(context,
+ {disableNormalization: true,
+ buffer: response});
+ testConvolver.channelInterpretation = initialInterpretation;
+ let referenceConvolver = new ConvolverNode(context,
+ {disableNormalization: true,
+ buffer: response});
+ // No need to set referenceConvolver.channelInterpretation because
+ // input is always stereo, due to up-mixing at gain node.
+ let referenceMixer = new GainNode(context);
+ referenceMixer.channelCount = 2;
+ referenceMixer.channelCountMode = "explicit";
+ referenceMixer.channelInterpretation = initialInterpretation;
+ referenceMixer.connect(referenceConvolver);
+
+ delay.connect(testConvolver);
+ delay.connect(referenceMixer);
+
+ monoSource.connect(testConvolver);
+ monoSource.connect(referenceMixer);
+
+ // A timer sends 'ended' when the convolvers are known to be processing
+ // stereo.
+ let timer = new ConstantSourceNode(context);
+ timer.start();
+ timer.stop((initialMonoFrames + 1) / context.sampleRate);
+
+ timer.onended = t.step_func(() => {
+ let changedInterpretation =
+ initialInterpretation == "speakers" ? "discrete" : "speakers";
+
+ // Switch channelInterpretation in test and reference paths.
+ testConvolver.channelInterpretation = changedInterpretation;
+ referenceMixer.channelInterpretation = changedInterpretation;
+
+ // Disconnect the stereo input from both test and reference convolvers.
+ // The disconnected convolvers will continue to output stereo for at least
+ // one frame. The test convolver will up-mix its mono input into its two
+ // buffers.
+ delay.disconnect();
+
+ // Capture the outputs in a script processor.
+ //
+ // The first two channels contain signal where some up-mixing occurs
+ // internally to the test convolver.
+ //
+ // The last two channels are expected to contain the same signal, but
+ // up-mixing was performed at a GainNode prior to convolution.
+ //
+ // Two stereo splitters will collect test and reference outputs.
+ let testSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ let referenceSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ testConvolver.connect(testSplitter);
+ referenceConvolver.connect(referenceSplitter);
+
+ let outputMerger = new ChannelMergerNode(context, {numberOfInputs: 4});
+ testSplitter.connect(outputMerger, 0, 0);
+ testSplitter.connect(outputMerger, 1, 1);
+ referenceSplitter.connect(outputMerger, 0, 2);
+ referenceSplitter.connect(outputMerger, 1, 3);
+
+ let processor = context.createScriptProcessor(256, 4, 0);
+ outputMerger.connect(processor);
+
+ processor.onaudioprocess = t.step_func_done((e) => {
+ e.target.onaudioprocess = null;
+ outputMerger.disconnect();
+
+ // The test convolver output is stereo for the first block.
+ let length = 128;
+
+ let buffer = e.inputBuffer;
+ let maxDiff = -1.0;
+ let frameIndex = 0;
+ let channelIndex = 0;
+ for (let c = 0; c < 2; ++c) {
+ let testOutput = buffer.getChannelData(0 + c);
+ let referenceOutput = buffer.getChannelData(2 + c);
+ for (var i = 0; i < length; ++i) {
+ var diff = Math.abs(testOutput[i] - referenceOutput[i]);
+ if (diff > maxDiff) {
+ maxDiff = diff;
+ frameIndex = i;
+ channelIndex = c;
+ }
+ }
+ }
+ assert_approx_equals(buffer.getChannelData(0 + channelIndex)[frameIndex],
+ buffer.getChannelData(2 + channelIndex)[frameIndex],
+ EPSILON,
+ `output at ${frameIndex} ` +
+ `in channel ${channelIndex}` );
+ });
+ });
+}
+
+async_test((t) => test_interpretation_change(t, "speakers", MONO_FRAMES),
+ "speakers to discrete, initially mono");
+async_test((t) => test_interpretation_change(t, "discrete", MONO_FRAMES),
+ "discrete to speakers");
+// Gecko uses a separate path for "speakers" initial up-mixing when the
+// convolver's first input is stereo, so test that separately.
+async_test((t) => test_interpretation_change(t, "speakers", 0),
+ "speakers to discrete, initially stereo");
+</script>
diff --git a/dom/media/webaudio/test/test_convolverNodeDelay.html b/dom/media/webaudio/test/test_convolverNodeDelay.html
new file mode 100644
index 0000000000..2e8caf8027
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeDelay.html
@@ -0,0 +1,72 @@
+<!DOCTYPE html>
+<title>Test convolution to delay a triangle pulse</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+const sampleRate = 48000;
+const LENGTH = 12800;
+// tolerate 16-bit math.
+const EPSILON = 1.0 / Math.pow(2, 15);
+
+// Triangle pulse
+var sourceBuffer = new OfflineAudioContext(1, 1, sampleRate).
+ createBuffer(1, 2 * 128, sampleRate);
+var channelData = sourceBuffer.getChannelData(0);
+for (var i = 0; i < 128; ++i) {
+ channelData[i] = i/128;
+ channelData[128 + i] = 1.0 - i/128;
+}
+
+function test_delay_index(delayIndex) {
+
+ var context = new OfflineAudioContext(2, LENGTH, sampleRate);
+
+ var merger = context.createChannelMerger(2);
+ merger.connect(context.destination);
+
+ var impulse = context.createBuffer(1, delayIndex + 1, sampleRate);
+ impulse.getChannelData(0)[delayIndex] = 1.0;
+ var convolver = context.createConvolver();
+ convolver.normalize = false;
+ convolver.buffer = impulse;
+ convolver.connect(merger, 0, 0);
+
+ var delayTime = delayIndex/sampleRate;
+ var delay = context.createDelay(delayTime || 1/sampleRate);
+ delay.delayTime.value = delayTime;
+ delay.connect(merger, 0, 1);
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+ source.connect(convolver);
+ source.connect(delay);
+ source.start(0);
+
+ return context.startRendering().
+ then((buffer) => {
+ var convolverOutput = buffer.getChannelData(0);
+ var delayOutput = buffer.getChannelData(1);
+ var maxDiff = 0.0;
+ var maxIndex = 0;
+ for (var i = 0; i < buffer.length; ++i) {
+ var diff = Math.abs(convolverOutput[i] - delayOutput[i]);
+ if (diff > maxDiff) {
+ maxDiff = diff;
+ maxIndex = i;
+ }
+ }
+ // The convolver should produce similar output to the delay.
+ assert_approx_equals(convolverOutput[maxIndex], delayOutput[maxIndex],
+ EPSILON, "output at " + maxIndex);
+ });
+}
+
+// The 5/4 ratio provides sampling across a range of delays and offsets within
+// blocks.
+for (var delayIndex = 0;
+ delayIndex < LENGTH;
+ delayIndex = Math.floor((5 * (delayIndex + 1)) / 4)) {
+ promise_test(test_delay_index.bind(null, delayIndex),
+ "Delay " + delayIndex);
+}
+</script>
diff --git a/dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html b/dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html
new file mode 100644
index 0000000000..1cfb51ce8a
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<title>Test convolution effect has finite duration</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+
+ const responseLength = 256;
+ // Accept an influence period of twice the responseLength to accept FFT
+ // implementations.
+ const tolerancePeriod = 2 * responseLength;
+ const totalSize = tolerancePeriod + responseLength;
+
+ var context = new OfflineAudioContext(1, totalSize, 48000);
+
+ var responseBuffer =
+ context.createBuffer(1, responseLength, context.sampleRate);
+ var responseChannelData = responseBuffer.getChannelData(0);
+ responseChannelData[0] = 1;
+ responseChannelData[responseLength - 1] = 1;
+ var convolver = context.createConvolver();
+ convolver.buffer = responseBuffer;
+ convolver.connect(context.destination);
+
+ var sourceBuffer = context.createBuffer(1, totalSize, context.sampleRate);
+ sourceBuffer.getChannelData(0)[0] = NaN;
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+ source.connect(convolver);
+ source.start();
+
+ return context.startRendering().
+ then((buffer) => {
+ var convolverOutput = buffer.getChannelData(0);
+ // There should be no non-zeros after the tolerance period.
+ var testIndex = tolerancePeriod;
+ for (;
+ testIndex < buffer.length - 1 && convolverOutput[testIndex] == 0;
+ ++testIndex) {
+ }
+ assert_equals(convolverOutput[testIndex], 0, "output at " + testIndex);
+ });
+});
+</script>
diff --git a/dom/media/webaudio/test/test_convolverNodeNormalization.html b/dom/media/webaudio/test/test_convolverNodeNormalization.html
new file mode 100644
index 0000000000..24cb7d1670
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeNormalization.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html>
+<title>Test normalization of convolution buffers</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+// Constants from
+// https://www.w3.org/TR/2015/WD-webaudio-20151208/#widl-ConvolverNode-normalize
+const GainCalibration = 0.00125;
+const GainCalibrationSampleRate = 44100;
+
+const sampleRate = GainCalibrationSampleRate;
+const LENGTH = 12800;
+// tolerate 16-bit math.
+const EPSILON = 1.0 / Math.pow(2, 15);
+
+function test_normalization_via_response_concat(delayIndex)
+{
+ var context = new OfflineAudioContext(1, LENGTH, sampleRate);
+
+ var impulse = context.createBuffer(1, 1, sampleRate);
+ impulse.getChannelData(0)[0] = 1.0;
+ var source = context.createBufferSource();
+ source.buffer = impulse;
+ source.start(0);
+
+ // Construct a set of adjacent responses in such a way that, when each is
+ // convolved with the impulse, they can be merged to produce a constant.
+
+ // The 5/4 ratio provides a range of lengths with different offsets within
+ // blocks.
+ var i = 0;
+ for (var responseEnd = 1;
+ i < LENGTH;
+ responseEnd = Math.floor((5 * responseEnd) / 4) + 1) {
+ var responseBuffer = context.createBuffer(1, responseEnd, sampleRate);
+ var response = responseBuffer.getChannelData(0);
+ var responseStart = i;
+ // The values in the response should be normalized, and so the output
+ // should not be dependent on the value. Pick a changing value to test
+ // this.
+ var value = responseStart + 1;
+ for (; i < responseEnd; ++i) {
+ response[i] = value;
+ }
+ var convolver = context.createConvolver();
+ convolver.normalize = true;
+ convolver.buffer = responseBuffer;
+ convolver.connect(context.destination);
+ // Undo the normalization calibration by scaling the impulse so as to
+ // expect unit pulse output from the convolver.
+ var gain = context.createGain();
+ gain.gain.value =
+ Math.sqrt((responseEnd - responseStart) / responseEnd) / GainCalibration;
+ gain.connect(convolver);
+ source.connect(gain);
+ }
+
+ return context.startRendering().
+ then((buffer) => {
+ var output = buffer.getChannelData(0);
+ var max = output[0];
+ var maxIndex = 0;
+ var min = max;
+ var minIndex = 0;
+ for (var i = 1; i < buffer.length; ++i) {
+ if (output[i] > max) {
+ max = output[i];
+ maxIndex = i;
+ } else if (output[i] < min) {
+ min = output[i];
+ minIndex = i;
+ }
+ }
+ assert_approx_equals(output[maxIndex], 1.0, EPSILON,
+ "max output at " + maxIndex);
+ assert_approx_equals(output[minIndex], 1.0, EPSILON,
+ "min output at " + minIndex);
+ });
+}
+
+promise_test(test_normalization_via_response_concat,
+ "via response concatenation");
+</script>
diff --git a/dom/media/webaudio/test/test_convolverNodeOOM.html b/dom/media/webaudio/test/test_convolverNodeOOM.html
new file mode 100644
index 0000000000..2983d2f65c
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeOOM.html
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ConvolverNode with very large buffer that triggers an OOM</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ skipOfflineContextTests: true,
+ createGraph(context) {
+ var source = context.createOscillator();
+ var convolver = context.createConvolver();
+ // Very big buffer that results in an OOM
+ try {
+ var buffer = context.createBuffer(2, 300000000, context.sampleRate)
+ var channel = buffer.getChannelData(0);
+ } catch(e) {
+ // OOM when attempting to create the buffer, this can happen on 32bits
+ // OSes. Simply return here.
+ return convolver;
+ }
+ source.connect(convolver);
+ try {
+ convolver.buffer = buffer;
+ } catch (e) {
+ // This can also OOM.
+ return convolver;
+ }
+ source.start();
+ return convolver;
+ }
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_convolverNodePassThrough.html b/dom/media/webaudio/test/test_convolverNodePassThrough.html
new file mode 100644
index 0000000000..54682aee0c
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodePassThrough.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ConvolverNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var convolver = context.createConvolver();
+
+ source.buffer = this.buffer;
+ convolver.buffer = this.buffer;
+
+ source.connect(convolver);
+
+ var convolverWrapped = SpecialPowers.wrap(convolver);
+ ok("passThrough" in convolverWrapped, "ConvolverNode should support the passThrough API");
+ convolverWrapped.passThrough = true;
+
+ source.start(0);
+ return convolver;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_convolverNodeWithGain.html b/dom/media/webaudio/test/test_convolverNodeWithGain.html
new file mode 100644
index 0000000000..0762f16329
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNodeWithGain.html
@@ -0,0 +1,62 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ConvolverNode after a GainNode - Bug 891254 </title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+const signalLength = 2048;
+const responseLength = 100;
+const outputLength = 4096; // > signalLength + responseLength
+
+var gTest = {
+ length: outputLength,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, signalLength, context.sampleRate);
+ for (var i = 0; i < signalLength; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(2 * Math.PI * i / signalLength);
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.start(0);
+
+ var response = context.createBuffer(1, responseLength, context.sampleRate);
+ for (var i = 0; i < responseLength; ++i) {
+ response.getChannelData(0)[i] = i / responseLength;
+ }
+
+ var gain = context.createGain();
+ gain.gain.value = -1;
+ source.connect(gain);
+
+ var convolver1 = context.createConvolver();
+ convolver1.buffer = response;
+ gain.connect(convolver1);
+
+ var convolver2 = context.createConvolver();
+ convolver2.buffer = response;
+ source.connect(convolver2);
+
+ // The output of convolver1 should be the inverse of convolver2, so blend
+ // them together and expect silence.
+ var blend = context.createGain();
+ convolver1.connect(blend);
+ convolver2.connect(blend);
+
+ return blend;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_convolverNode_mono_mono.html b/dom/media/webaudio/test/test_convolverNode_mono_mono.html
new file mode 100644
index 0000000000..1585e1b619
--- /dev/null
+++ b/dom/media/webaudio/test/test_convolverNode_mono_mono.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+
+<html>
+<head>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<script type="text/javascript" src="webaudio.js"></script>
+<script type="text/javascript" src="layouttest-glue.js"></script>
+<script type="text/javascript" src="blink/audio-testing.js"></script>
+<script type="text/javascript" src="blink/convolution-testing.js"></script>
+<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+
+<body>
+
+<div id="description"></div>
+<div id="console"></div>
+
+<script>
+description("Tests ConvolverNode processing a mono channel with mono impulse response.");
+SimpleTest.waitForExplicitFinish();
+
+// To test the convolver, we convolve two square pulses together to
+// produce a triangular pulse. To verify the result is correct we
+// check several parts of the result. First, we make sure the initial
+// part of the result is zero (due to the latency in the convolver).
+// Next, the triangular pulse should match the theoretical result to
+// within some roundoff. After the triangular pulse, the result
+// should be exactly zero, but round-off prevents that. We make sure
+// the part after the pulse is sufficiently close to zero. Finally,
+// the result should be exactly zero because the inputs are exactly
+// zero.
+function runTest() {
+ if (window.testRunner) {
+ testRunner.dumpAsText();
+ testRunner.waitUntilDone();
+ }
+
+ window.jsTestIsAsync = true;
+
+ // Create offline audio context.
+ var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+
+ var squarePulse = createSquarePulseBuffer(context, pulseLengthFrames);
+ var trianglePulse = createTrianglePulseBuffer(context, 2 * pulseLengthFrames);
+
+ var bufferSource = context.createBufferSource();
+ bufferSource.buffer = squarePulse;
+
+ var convolver = context.createConvolver();
+ convolver.normalize = false;
+ convolver.buffer = squarePulse;
+
+ bufferSource.connect(convolver);
+ convolver.connect(context.destination);
+
+ bufferSource.start(0);
+
+ context.oncomplete = checkConvolvedResult(trianglePulse);
+ context.startRendering();
+}
+
+function finishJSTest() {
+ SimpleTest.finish();
+}
+
+runTest();
+successfullyParsed = true;
+
+</script>
+
+<script src="../fast/js/resources/js-test-post.js"></script>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_currentTime.html b/dom/media/webaudio/test/test_currentTime.html
new file mode 100644
index 0000000000..66fdf42653
--- /dev/null
+++ b/dom/media/webaudio/test/test_currentTime.html
@@ -0,0 +1,27 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioContext.currentTime</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("This test needs to wait a while for the AudioContext's timer to start.");
+addLoadEvent(function() {
+ var ac = new AudioContext();
+ is(ac.currentTime, 0, "AudioContext.currentTime should be 0 initially");
+ ac.onstatechange = function () {
+ ok(ac.state == "running", "AudioContext.currentTime should eventually start");
+ ok(ac.currentTime > 0, "AudioContext.currentTime should have increased by now");
+ SimpleTest.finish();
+ }
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html b/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html
new file mode 100644
index 0000000000..e7c6d2db0c
--- /dev/null
+++ b/dom/media/webaudio/test/test_decodeAudioDataOnDetachedBuffer.html
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML>
+<html>
+ <meta charset=utf-8>
+<head>
+ <title>Bug 1308434 - Test DecodeAudioData on detached buffer</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script type="text/javascript">
+var testDecodeAudioDataOnDetachedBuffer = function(buffer) {
+ var context = new AudioContext();
+
+ // make the buffer detached
+ context.decodeAudioData(buffer);
+
+ // check that the buffer is detached
+ is(buffer.byteLength, 0, "Buffer should be detached");
+
+ // call decodeAudioData on detached buffer
+ context.decodeAudioData(buffer).then(function(b) {
+ ok(false, "We should not be able to decode the detached buffer but we do");
+ SimpleTest.finish();
+ }, function(r) {
+ SimpleTest.isa(r, TypeError);
+ is(r.message, "BaseAudioContext.decodeAudioData: Buffer argument can't be a detached buffer", "Incorrect message");
+ SimpleTest.finish();
+ });
+};
+
+var filename = "small-shot.mp3";
+
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", filename);
+ xhr.responseType = "arraybuffer";
+
+ xhr.onload = function() {
+ testDecodeAudioDataOnDetachedBuffer(xhr.response);
+ };
+
+ xhr.send();
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_decodeAudioDataPromise.html b/dom/media/webaudio/test/test_decodeAudioDataPromise.html
new file mode 100644
index 0000000000..139a686db1
--- /dev/null
+++ b/dom/media/webaudio/test/test_decodeAudioDataPromise.html
@@ -0,0 +1,62 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the decodeAudioData API with Promise</title>
+
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ <script src="webaudio.js"></script>
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+var finished = 0;
+
+function finish() {
+ if (++finished == 2) {
+ SimpleTest.finish();
+ }
+}
+
+var ac = new AudioContext();
+// Test that a the promise is rejected with an invalid source buffer.
+expectNoException(function() {
+ var p = ac.decodeAudioData(" ");
+ ok(p instanceof Promise, "AudioContext.decodeAudioData should return a Promise");
+ p.then(function(data) {
+ ok(false, "Promise should not resolve with an invalid source buffer.");
+ finish();
+ }).catch(function(e) {
+ ok(true, "Promise should be rejected with an invalid source buffer.");
+ ok(e.name == "TypeError", "The error should be TypeError");
+ finish();
+ })
+});
+
+// Test that a the promise is resolved with a valid source buffer.
+var xhr = new XMLHttpRequest();
+xhr.open("GET", "ting-44.1k-1ch.ogg", true);
+xhr.responseType = "arraybuffer";
+xhr.onload = function() {
+ var p = ac.decodeAudioData(xhr.response);
+ ok(p instanceof Promise, "AudioContext.decodeAudioData should return a Promise");
+ p.then(function(data) {
+ ok(data instanceof AudioBuffer, "Promise should resolve, passing an AudioBuffer");
+ ok(true, "Promise should resolve with a valid source buffer.");
+ finish();
+ }).catch(function() {
+ ok(false, "Promise should not be rejected with a valid source buffer.");
+ finish();
+ });
+};
+xhr.send();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_decodeAudioError.html b/dom/media/webaudio/test/test_decodeAudioError.html
new file mode 100644
index 0000000000..f18b971ac4
--- /dev/null
+++ b/dom/media/webaudio/test/test_decodeAudioError.html
@@ -0,0 +1,74 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the decodeAudioData Errors</title>
+
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ <script src="webaudio.js"></script>
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+var finished = 0;
+
+var ctx = new AudioContext();
+
+function errorExpectedWithFile(file, errorMsg) {
+ var xhr = new XMLHttpRequest();
+ function test(e) {
+ ok(e instanceof DOMException,
+ "The exception should be an instance of DOMException");
+ ok(e.name == "EncodingError",
+ "The exception name should be EncodingError");
+ ok(e.message == errorMsg,
+ "The exception message is not the one intended.\n" +
+ "\tExpected : " + errorMsg + "\n" +
+ "\tGot : " + e.message );
+ finish();
+ }
+ xhr.open("GET", file, true);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = function() {
+ ctx.decodeAudioData(xhr.response, buffer => {
+ ok(false, "You should not be able to decode that");
+ finish();
+ }, e => test(e))
+ .then(buffer => {
+ ok(false, "You should not be able to decode that");
+ finish();
+ })
+ .catch(e => test(e));
+ };
+ xhr.send();
+}
+
+function finish() {
+ if (++finished == 4) {
+ SimpleTest.finish();
+ }
+}
+
+// Unknown Content
+errorExpectedWithFile("404", "The buffer passed to decodeAudioData contains an unknown content type.");
+
+// Invalid Content
+errorExpectedWithFile("invalidContent.flac", "The buffer passed to decodeAudioData contains invalid content which cannot be decoded successfully.");
+
+// No Audio
+// # Bug 1656032
+// Think about increasing the finish counter to 6 when activating this line
+// errorExpectedWithFile("noaudio.webm", "The buffer passed to decodeAudioData does not contain any audio.");
+
+// Unknown Error
+// errorExpectedWithFile("There is no file we can't handle", "An unknown error occurred while processing decodeAudioData.");
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_decodeMultichannel.html b/dom/media/webaudio/test/test_decodeMultichannel.html
new file mode 100644
index 0000000000..a799c641ee
--- /dev/null
+++ b/dom/media/webaudio/test/test_decodeMultichannel.html
@@ -0,0 +1,75 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset=utf-8>
+<head>
+ <title>Test that we can decode multichannel file with webaudio and &lt;audio&gt;</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+var testcases = [
+ {
+ filename: "audio-quad.wav",
+ channels: 4
+ },
+ {
+ filename: "8kHz-320kbps-6ch.aac",
+ channels: 6
+ }
+];
+
+SimpleTest.waitForExplicitFinish();
+
+function decodeUsingAudioElement(filename, resolve) {
+ var a = new Audio();
+ a.addEventListener("error", function() {
+ ok(false, "Error loading metadata");
+ resolve();
+ });
+ a.addEventListener("loadedmetadata", function() {
+ ok(true, "Metadata Loaded");
+ resolve();
+ });
+
+ a.src = filename;
+ a.load();
+}
+
+function testOne({filename, channels}) {
+ return new Promise(resolve => {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", filename);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = function() {
+ var context = new AudioContext();
+ context.decodeAudioData(xhr.response, function(b) {
+ ok(true, "Decoding of a wave file with four channels succeded.");
+ is(b.numberOfChannels,
+ channels,
+ `The AudioBuffer decoded from ${filename} should have ${channels} channels.`);
+ decodeUsingAudioElement(filename, resolve);
+ }, function() {
+ ok(false, `Decoding ${filename} failed)`);
+ decodeUsingAudioElement(filename, resolve);
+ });
+ };
+ xhr.send(null);
+ });
+}
+
+async function runTest() {
+ for (var testcase of testcases) {
+ await testOne(testcase);
+ }
+
+ SimpleTest.finish();
+}
+
+addLoadEvent(runTest);
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_decodeOpusTail.html b/dom/media/webaudio/test/test_decodeOpusTail.html
new file mode 100644
index 0000000000..451b2b6a23
--- /dev/null
+++ b/dom/media/webaudio/test/test_decodeOpusTail.html
@@ -0,0 +1,28 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Regression test to check that opus files don't have a tail at the end.</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+// This gets a 1 second Opus file and decodes it to a buffer. The opus file is
+// decoded at 48kHz, and the OfflineAudioContext is also at 48kHz, no resampling
+// is taking place.
+fetch('sweep-300-330-1sec.opus')
+.then(function(response) { return response.arrayBuffer(); })
+.then(function(buffer) {
+ var off = new OfflineAudioContext(1, 128, 48000);
+ off.decodeAudioData(buffer, function(decoded) {
+ var pcm = decoded.getChannelData(0);
+ is(pcm.length, 48000, "The length of the decoded file is correct.");
+ SimpleTest.finish();
+ });
+});
+
+</script>
diff --git a/dom/media/webaudio/test/test_decoderDelay.html b/dom/media/webaudio/test/test_decoderDelay.html
new file mode 100644
index 0000000000..d0fbfbed29
--- /dev/null
+++ b/dom/media/webaudio/test/test_decoderDelay.html
@@ -0,0 +1,144 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8" />
+ <title>Test that decoder delay is handled</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+ <script class="testbody" type="text/javascript">
+ SimpleTest.waitForExplicitFinish();
+ const {AppConstants} =
+ SpecialPowers.ChromeUtils.import("resource://gre/modules/AppConstants.jsm");
+
+ var tests_half_a_second = [
+ "half-a-second-1ch-44100-aac.mp4",
+ "half-a-second-1ch-44100-flac.flac",
+ "half-a-second-1ch-44100-libmp3lame.mp3",
+ "half-a-second-1ch-44100-libopus.opus",
+ "half-a-second-1ch-44100-libopus.webm",
+ "half-a-second-1ch-44100-libvorbis.ogg",
+ "half-a-second-1ch-44100.wav",
+ "half-a-second-1ch-48000-aac.mp4",
+ "half-a-second-1ch-48000-flac.flac",
+ "half-a-second-1ch-48000-libmp3lame.mp3",
+ "half-a-second-1ch-48000-libopus.opus",
+ "half-a-second-1ch-48000-libopus.webm",
+ "half-a-second-1ch-48000-libvorbis.ogg",
+ "half-a-second-1ch-48000.wav",
+ "half-a-second-2ch-44100-aac.mp4",
+ "half-a-second-2ch-44100-flac.flac",
+ "half-a-second-2ch-44100-libmp3lame.mp3",
+ "half-a-second-2ch-44100-libopus.opus",
+ "half-a-second-2ch-44100-libopus.webm",
+ "half-a-second-2ch-44100-libvorbis.ogg",
+ "half-a-second-2ch-44100.wav",
+ "half-a-second-2ch-48000-aac.mp4",
+ "half-a-second-2ch-48000-flac.flac",
+ "half-a-second-2ch-48000-libmp3lame.mp3",
+ "half-a-second-2ch-48000-libopus.opus",
+ "half-a-second-2ch-48000-libopus.webm",
+ "half-a-second-2ch-48000-libvorbis.ogg",
+ "half-a-second-2ch-48000.wav",
+ ];
+
+ // Those files are almost exactly half a second, but don't have enough pre-roll/padding
+ // information in the container, or the container isn't parsed properly, so
+ // aren't trimmed appropriately.
+ // vorbis webm, opus mp4, aac adts
+ var tests_adts = [
+ "half-a-second-1ch-44100-aac.aac",
+ "half-a-second-1ch-44100-libopus.mp4",
+ "half-a-second-1ch-44100-libvorbis.webm",
+ "half-a-second-1ch-48000-aac.aac",
+ "half-a-second-1ch-48000-libopus.mp4",
+ "half-a-second-1ch-48000-libvorbis.webm",
+ "half-a-second-2ch-44100-aac.aac",
+ "half-a-second-2ch-44100-libopus.mp4",
+ "half-a-second-2ch-44100-libvorbis.webm",
+ "half-a-second-2ch-48000-aac.aac",
+ "half-a-second-2ch-48000-libopus.mp4",
+ "half-a-second-2ch-48000-libvorbis.webm",
+ ];
+
+ // Other files that have interesting characteristics.
+ var tests_others = [
+ {
+ // Very short VBR file, 16 frames of audio at 44100. Padding spanning two
+ // packets.
+ "path": "sixteen-frames.mp3",
+ "frameCount": 16,
+ "samplerate": 44100,
+ "fuzz": {}
+ },
+ {
+ // This is incorrect (the duration should be 0.5s exactly)
+ // This is tracked in https://github.com/mozilla/mp4parse-rust/issues/404
+ "path":"half-a-second-1ch-44100-aac-afconvert.mp4",
+ "frameCount": 22464,
+ "samplerate": 44100,
+ "fuzz": {
+ "android": 2
+ }
+ }
+ ];
+
+ var all_tests = [tests_half_a_second, tests_adts, tests_others].flat();
+
+ var count = 0;
+ function checkDone() {
+ if (++count == all_tests.length) {
+ SimpleTest.finish();
+ }
+ }
+
+ async function doit() {
+ var context = new OfflineAudioContext(1, 128, 48000);
+ tests_half_a_second.forEach(async testfile => {
+ var response = await fetch(testfile);
+ var buffer = await response.arrayBuffer();
+ var decoded = await context.decodeAudioData(buffer);
+ is(
+ decoded.duration,
+ 0.5,
+ "The file " + testfile + " is half a second."
+ );
+ // Value found empirically after looking at the files. The initial
+ // amplitude should be 0 at phase 0 because those files are sine wave.
+ // The compression is sometimes lossy and the first sample is not always
+ // exactly 0.0.
+ ok(
+ Math.abs(decoded.getChannelData(0)[0]) <= 0.022,
+ `The start point for ${testfile} is correct ${ decoded.getChannelData(0)[0] }`
+ );
+ checkDone();
+ });
+ tests_adts.forEach(async testfile => {
+ var response = await fetch(testfile);
+ var buffer = await response.arrayBuffer();
+ var decoded = await context.decodeAudioData(buffer);
+ // Value found empirically after looking at the files. ADTS containers
+ // don't have encoder delay / padding info so we can't trim correctly.
+ ok(
+ Math.abs(decoded.duration - 0.5) < 0.02,
+ `The ADTS file ${testfile} is about half a second (${decoded.duration}, error: ${Math.abs(decoded.duration-0.5)}).`
+ );
+ checkDone();
+ });
+ tests_others.forEach(async test => {
+ // Get an context at a specific rate to avoid duration changes due to resampling.
+ var contextAtRate = new OfflineAudioContext(1, 128, test.samplerate);
+ var response = await fetch(test.path);
+ var buffer = await response.arrayBuffer();
+ var decoded = await contextAtRate.decodeAudioData(buffer);
+ const fuzz = test.fuzz[AppConstants.platform] ?? 0;
+ ok(Math.abs(decoded.length - test.frameCount) <= fuzz, `${test.path} is ${decoded.length} frames long`);
+ checkDone();
+ });
+ }
+
+ doit();
+ </script>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNode.html b/dom/media/webaudio/test/test_delayNode.html
new file mode 100644
index 0000000000..89172aa86f
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNode.html
@@ -0,0 +1,101 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DelayNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var delay = new DelayNode(context);
+ ok(delay.delayTime, "The audioparam member must exist");
+ is(delay.delayTime.value, 0, "Correct initial value");
+ is(delay.delayTime.defaultValue, 0, "Correct default value");
+ delay.delayTime.value = 0.5;
+ is(delay.delayTime.value, 0.5, "Correct initial value");
+ is(delay.delayTime.defaultValue, 0, "Correct default value");
+
+ delay = new DelayNode(context, { delayTime: 0.5 });
+ ok(delay.delayTime, "The audioparam member must exist");
+ is(delay.delayTime.value, 0.5, "Correct initial value");
+ is(delay.delayTime.defaultValue, 0, "Correct default value");
+
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+
+ delay = context.createDelay();
+
+ source.buffer = buffer;
+
+ source.connect(delay);
+
+ ok(delay.delayTime, "The audioparam member must exist");
+ is(delay.delayTime.value, 0, "Correct initial value");
+ is(delay.delayTime.defaultValue, 0, "Correct default value");
+ delay.delayTime.value = 0.5;
+ is(delay.delayTime.value, 0.5, "Correct initial value");
+ is(delay.delayTime.defaultValue, 0, "Correct default value");
+ is(delay.channelCount, 2, "delay node has 2 input channels by default");
+ is(delay.channelCountMode, "max", "Correct channelCountMode for the delay node");
+ is(delay.channelInterpretation, "speakers", "Correct channelCountInterpretation for the delay node");
+
+ expectException(function() {
+ context.createDelay(0);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ context.createDelay(180);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectTypeError(function() {
+ context.createDelay(NaN);
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ context.createDelay(-1);
+ }, DOMException.NOT_SUPPORTED_ERR);
+
+ expectException(function() {
+ new DelayNode(context, { maxDelayTime: 0 });
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ new DelayNode(context, { maxDelayTime: 180 });
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectTypeError(function() {
+ new DelayNode(context, { maxDelayTime: NaN });
+ }, DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() {
+ new DelayNode(context, { maxDelayTime: -1 });
+ }, DOMException.NOT_SUPPORTED_ERR);
+
+ context.createDelay(1); // should not throw
+
+ // Delay the source stream by 2048 frames
+ delay.delayTime.value = 2048 / context.sampleRate;
+
+ source.start(0);
+ return delay;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048 * 2, context.sampleRate);
+ for (var i = 2048; i < 2048 * 2; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i - 2048) / context.sampleRate);
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeAtMax.html b/dom/media/webaudio/test/test_delayNodeAtMax.html
new file mode 100644
index 0000000000..3d0afba0ac
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeAtMax.html
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DelayNode with maxDelayTime delay - bug 890528</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+const signalLength = 2048;
+const delayLength = 1000; // Not on a block boundary
+const outputLength = 4096 // > signalLength + 2 * delayLength;
+
+function applySignal(buffer, offset) {
+ for (var i = 0; i < signalLength; ++i) {
+ buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
+ }
+}
+
+var gTest = {
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, signalLength, context.sampleRate);
+ applySignal(buffer, 0);
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ const delayTime = delayLength / context.sampleRate;
+ var delay = context.createDelay(delayTime);
+ delay.delayTime.value = delayTime;
+
+ source.connect(delay);
+
+ source.start(0);
+ return delay;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, outputLength, context.sampleRate);
+ applySignal(expectedBuffer, delayLength);
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeChannelChanges.html b/dom/media/webaudio/test/test_delayNodeChannelChanges.html
new file mode 100644
index 0000000000..d40c792ef7
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeChannelChanges.html
@@ -0,0 +1,98 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>test DelayNode channel count changes</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestCompleteLog();
+
+const bufferSize = 4096;
+
+var ctx;
+var testDelay;
+var stereoDelay;
+var invertor;
+
+function compareOutputs(callback) {
+ var processor = ctx.createScriptProcessor(bufferSize, 2, 0);
+ testDelay.connect(processor);
+ invertor.connect(processor);
+ processor.onaudioprocess =
+ function(e) {
+ compareBuffers(e.inputBuffer,
+ ctx.createBuffer(2, bufferSize, ctx.sampleRate));
+ e.target.onaudioprocess = null;
+ callback();
+ }
+}
+
+function startTest() {
+ // And a two-channel signal
+ var merger = ctx.createChannelMerger();
+ merger.connect(testDelay);
+ merger.connect(stereoDelay);
+ var oscL = ctx.createOscillator();
+ oscL.connect(merger, 0, 0);
+ oscL.start(0);
+ var oscR = ctx.createOscillator();
+ oscR.type = "sawtooth";
+ oscR.connect(merger, 0, 1);
+ oscR.start(0);
+
+ compareOutputs(
+ function () {
+ // Disconnect the two-channel signal and test again
+ merger.disconnect();
+ compareOutputs(SimpleTest.finish);
+ });
+}
+
+function prepareTest() {
+ ctx = new AudioContext();
+
+ // The output of a test delay node with mono and stereo input will be
+ // compared with that of separate mono and stereo delay nodes.
+ const delayTime = 0.3 * bufferSize / ctx.sampleRate;
+ testDelay = ctx.createDelay(delayTime);
+ testDelay.delayTime.value = delayTime;
+ monoDelay = ctx.createDelay(delayTime);
+ monoDelay.delayTime.value = delayTime;
+ stereoDelay = ctx.createDelay(delayTime);
+ stereoDelay.delayTime.value = delayTime;
+
+ // Create a one-channel signal and connect to the delay nodes
+ var monoOsc = ctx.createOscillator();
+ monoOsc.frequency.value = 110;
+ monoOsc.connect(testDelay);
+ monoOsc.connect(monoDelay);
+ monoOsc.start(0);
+
+ // Invert the expected so that mixing with the test will find the difference.
+ invertor = ctx.createGain();
+ invertor.gain.value = -1.0;
+ monoDelay.connect(invertor);
+ stereoDelay.connect(invertor);
+
+ // Start the test after the delay nodes have begun processing.
+ var processor = ctx.createScriptProcessor(bufferSize, 1, 0);
+ processor.connect(ctx.destination);
+
+ processor.onaudioprocess =
+ function(e) {
+ e.target.onaudioprocess = null;
+ processor.disconnect();
+ startTest();
+ };
+}
+prepareTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeCycles.html b/dom/media/webaudio/test/test_delayNodeCycles.html
new file mode 100644
index 0000000000..82c5f62504
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeCycles.html
@@ -0,0 +1,157 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the support of cycles.</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+const sampleRate = 48000;
+const inputLength = 2048;
+
+addLoadEvent(function() {
+ function addSine(b) {
+ for (var i = 0; i < b.length; i++) {
+ b[i] += Math.sin(440 * 2 * Math.PI * i / sampleRate);
+ }
+ }
+
+ function getSineBuffer(ctx) {
+ var buffer = ctx.createBuffer(1, inputLength, ctx.sampleRate);
+ addSine(buffer.getChannelData(0));
+ return buffer;
+ }
+
+ function createAndPlayWithCycleAndDelayNode(ctx, delayFrames) {
+ var source = ctx.createBufferSource();
+ source.buffer = getSineBuffer(ctx);
+
+ var gain = ctx.createGain();
+ var delay = ctx.createDelay();
+ delay.delayTime.value = delayFrames/ctx.sampleRate;
+
+ source.connect(gain);
+ gain.connect(delay);
+ delay.connect(ctx.destination);
+ // cycle
+ delay.connect(gain);
+
+ source.start(0);
+ }
+
+ function createAndPlayWithCycleAndNoDelayNode(ctx) {
+ var source = ctx.createBufferSource();
+ source.loop = true;
+ source.buffer = getSineBuffer(ctx);
+
+ var gain = ctx.createGain();
+ var gain2 = ctx.createGain();
+
+ source.connect(gain);
+ gain.connect(gain2);
+ // cycle
+ gain2.connect(gain);
+ gain2.connect(ctx.destination);
+
+ source.start(0);
+ }
+
+ function createAndPlayWithCycleAndNoDelayNodeInCycle(ctx) {
+ var source = ctx.createBufferSource();
+ source.loop = true;
+ source.buffer = getSineBuffer(ctx);
+
+ var delay = ctx.createDelay();
+ var gain = ctx.createGain();
+ var gain2 = ctx.createGain();
+
+ // Their is a cycle, a delay, but the delay is not in the cycle.
+ source.connect(delay);
+ delay.connect(gain);
+ gain.connect(gain2);
+ // cycle
+ gain2.connect(gain);
+ gain2.connect(ctx.destination);
+
+ source.start(0);
+ }
+
+ var remainingTests = 0;
+ function finish() {
+ if (--remainingTests == 0) {
+ SimpleTest.finish();
+ }
+ }
+
+ function getOfflineContext(oncomplete) {
+ var ctx = new OfflineAudioContext(1, sampleRate, sampleRate);
+ ctx.oncomplete = oncomplete;
+ return ctx;
+ }
+
+ function checkSilentBuffer(e) {
+ var buffer = e.renderedBuffer.getChannelData(0);
+ for (var i = 0; i < buffer.length; i++) {
+ if (buffer[i] != 0.0) {
+ ok(false, "buffer should be silent.");
+ finish();
+ return;
+ }
+ }
+ ok(true, "buffer should be silent.");
+ finish();
+ }
+
+ function checkNoisyBuffer(e, aDelayFrames) {
+ delayFrames = Math.max(128, aDelayFrames);
+
+ var expected = new Float32Array(e.renderedBuffer.length);
+ for (var i = delayFrames; i < expected.length; i += delayFrames) {
+ addSine(expected.subarray(i, i + inputLength));
+ }
+
+ compareChannels(e.renderedBuffer.getChannelData(0), expected);
+ finish();
+ }
+
+ function expectSilentOutput(f) {
+ remainingTests++;
+ var ctx = getOfflineContext(checkSilentBuffer);
+ f(ctx);
+ ctx.startRendering();
+ }
+
+ function expectNoisyOutput(delayFrames) {
+ remainingTests++;
+ var ctx = getOfflineContext();
+ ctx.oncomplete = function(e) { checkNoisyBuffer(e, delayFrames); };
+ createAndPlayWithCycleAndDelayNode(ctx, delayFrames);
+ ctx.startRendering();
+ }
+
+ // This is trying to make a graph with a cycle and no DelayNode in the graph.
+ // The cycle subgraph should be muted, in this graph the output should be silent.
+ expectSilentOutput(createAndPlayWithCycleAndNoDelayNode);
+ // This is trying to make a graph with a cycle and a DelayNode in the graph, but
+ // not part of the cycle.
+ // The cycle subgraph should be muted, in this graph the output should be silent.
+ expectSilentOutput(createAndPlayWithCycleAndNoDelayNodeInCycle);
+ // Those are making legal graphs, with at least one DelayNode in the cycle.
+ // There should be some non-silent output.
+ expectNoisyOutput(sampleRate/4);
+ // DelayNode.delayTime will be clamped to 128/ctx.sampleRate.
+ // There should be some non-silent output.
+ expectNoisyOutput(0);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodePassThrough.html b/dom/media/webaudio/test/test_delayNodePassThrough.html
new file mode 100644
index 0000000000..0c2d1db30a
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodePassThrough.html
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DelayNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var delay = context.createDelay();
+
+ source.buffer = this.buffer;
+
+ source.connect(delay);
+
+ delay.delayTime.value = 0.5;
+
+ // Delay the source stream by 2048 frames
+ delay.delayTime.value = 2048 / context.sampleRate;
+
+ var delayWrapped = SpecialPowers.wrap(delay);
+ ok("passThrough" in delayWrapped, "DelayNode should support the passThrough API");
+ delayWrapped.passThrough = true;
+
+ source.start(0);
+ return delay;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+ var silence = context.createBuffer(1, 2048, context.sampleRate);
+
+ return [this.buffer, silence];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html b/dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html
new file mode 100644
index 0000000000..235a55a4c8
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DelayNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var delay = context.createDelay(0.02);
+
+ source.buffer = this.buffer;
+
+ source.connect(delay);
+
+ source.start(0);
+ return delay;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+ this.buffer = expectedBuffer;
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeTailIncrease.html b/dom/media/webaudio/test/test_delayNodeTailIncrease.html
new file mode 100644
index 0000000000..a511a4a3ff
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeTailIncrease.html
@@ -0,0 +1,71 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test increasing delay of DelayNode after input finishes</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+const signalLength = 100;
+const bufferSize = 1024;
+// Delay should be long enough to allow CC to run
+const delayBufferCount = 50;
+const delayLength = delayBufferCount * bufferSize + 700;
+
+var count = 0;
+
+function applySignal(buffer, offset) {
+ for (var i = 0; i < signalLength; ++i) {
+ buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
+ }
+}
+
+function onAudioProcess(e) {
+ switch(count) {
+ case 5:
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+ break;
+ case delayBufferCount:
+ var offset = delayLength - count * bufferSize;
+ var ctx = e.target.context;
+ var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
+ applySignal(expected, offset);
+ compareBuffers(e.inputBuffer, expected);
+ SimpleTest.finish();
+ }
+ count++;
+}
+
+function startTest() {
+ var ctx = new AudioContext();
+ var processor = ctx.createScriptProcessor(bufferSize, 1, 0);
+ processor.onaudioprocess = onAudioProcess;
+
+ // Switch on delay at a time in the future.
+ var delayDuration = delayLength / ctx.sampleRate;
+ var delayStartTime = (delayLength - bufferSize) / ctx.sampleRate;
+ var delay = ctx.createDelay(delayDuration);
+ delay.delayTime.setValueAtTime(delayDuration, delayStartTime);
+ delay.connect(processor);
+
+ // Short signal that finishes before switching to long delay
+ var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
+ applySignal(buffer, 0);
+ var source = ctx.createBufferSource();
+ source.buffer = buffer;
+ source.start();
+ source.connect(delay);
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html b/dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html
new file mode 100644
index 0000000000..c6723f643d
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html
@@ -0,0 +1,95 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test tail time lifetime of DelayNode after input is disconnected</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// Web Audio doesn't provide a means to precisely time disconnect()s but we
+// can test that the output of delay nodes matches the output from their
+// sources before they are disconnected.
+
+SimpleTest.waitForExplicitFinish();
+
+const signalLength = 128;
+const bufferSize = 4096;
+const sourceCount = bufferSize / signalLength;
+// Delay should be long enough to allow CC to run
+var delayBufferCount = 20;
+const delayLength = delayBufferCount * bufferSize;
+
+var sourceOutput = new Float32Array(bufferSize);
+var delayOutputCount = 0;
+var sources = [];
+
+function onDelayOutput(e) {
+ if (delayOutputCount < delayBufferCount) {
+ delayOutputCount++;
+ return;
+ }
+
+ compareChannels(e.inputBuffer.getChannelData(0), sourceOutput);
+ e.target.onaudioprocess = null;
+ SimpleTest.finish();
+}
+
+function onSourceOutput(e) {
+ // Record the first buffer
+ e.inputBuffer.copyFromChannel(sourceOutput, 0);
+ e.target.onaudioprocess = null;
+}
+
+function disconnectSources() {
+ for (var i = 0; i < sourceCount; ++i) {
+ sources[i].disconnect();
+ }
+
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+}
+
+function startTest() {
+ var ctx = new AudioContext();
+
+ var sourceProcessor = ctx.createScriptProcessor(bufferSize, 1, 0);
+ sourceProcessor.onaudioprocess = onSourceOutput;
+ // Keep audioprocess events going after source disconnect.
+ sourceProcessor.connect(ctx.destination);
+
+ var delayProcessor = ctx.createScriptProcessor(bufferSize, 1, 0);
+ delayProcessor.onaudioprocess = onDelayOutput;
+
+ var delayDuration = delayLength / ctx.sampleRate;
+ for (var i = 0; i < sourceCount; ++i) {
+ var delay = ctx.createDelay(delayDuration);
+ delay.delayTime.value = delayDuration;
+ delay.connect(delayProcessor);
+
+ var source = ctx.createOscillator();
+ source.frequency.value = 440 + 10 * i
+ source.start(i * signalLength / ctx.sampleRate);
+ source.stop((i + 1) * signalLength / ctx.sampleRate);
+ source.connect(delay);
+ source.connect(sourceProcessor);
+
+ sources[i] = source;
+ }
+
+ // Assuming the above Web Audio operations have already scheduled an event
+ // to run in stable state and start the graph thread, schedule a subsequent
+ // event to disconnect the sources, which will remove main thread connection
+ // references before it knows the graph thread has started using the source
+ // streams.
+ SimpleTest.executeSoon(disconnectSources);
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeTailWithGain.html b/dom/media/webaudio/test/test_delayNodeTailWithGain.html
new file mode 100644
index 0000000000..60cca276c0
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeTailWithGain.html
@@ -0,0 +1,72 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test tail time lifetime of DelayNode indirectly connected to source</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+const signalLength = 130;
+const bufferSize = 1024;
+// Delay should be long enough to allow CC to run
+const delayBufferCount = 50;
+const delayLength = delayBufferCount * bufferSize + 700;
+
+var count = 0;
+
+function applySignal(buffer, offset) {
+ for (var i = 0; i < signalLength; ++i) {
+ buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
+ }
+}
+
+function onAudioProcess(e) {
+ switch(count) {
+ case 5:
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+ break;
+ case delayBufferCount:
+ var offset = delayLength - count * bufferSize;
+ var ctx = e.target.context;
+ var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
+ applySignal(expected, offset);
+ compareBuffers(e.inputBuffer, expected);
+ SimpleTest.finish();
+ }
+ count++;
+}
+
+function startTest() {
+ var ctx = new AudioContext();
+ var processor = ctx.createScriptProcessor(bufferSize, 1, 0);
+ processor.onaudioprocess = onAudioProcess;
+
+ var delayDuration = delayLength / ctx.sampleRate;
+ var delay = ctx.createDelay(delayDuration);
+ delay.delayTime.value = delayDuration;
+ delay.connect(processor);
+
+ var gain = ctx.createGain();
+ gain.connect(delay);
+
+ // Short signal that finishes before garbage collection
+ var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
+ applySignal(buffer, 0);
+ var source = ctx.createBufferSource();
+ source.buffer = buffer;
+ source.start();
+ source.connect(gain);
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeTailWithReconnect.html b/dom/media/webaudio/test/test_delayNodeTailWithReconnect.html
new file mode 100644
index 0000000000..da5f02b052
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeTailWithReconnect.html
@@ -0,0 +1,136 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test tail time lifetime of DelayNode after input finishes and new input added</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+// The buffer source will start on a block boundary, so keeping the signal
+// within one block ensures that it will not cross AudioProcessingEvent buffer
+// boundaries.
+const signalLength = 128;
+const bufferSize = 1024;
+// Delay should be long enough to allow CC to run
+var delayBufferCount = 50;
+var delayBufferOffset;
+const delayLength = delayBufferCount * bufferSize;
+
+var phase = "initial";
+var sourceCount = 0;
+var delayCount = 0;
+var oscillator;
+var delay;
+var source;
+
+function applySignal(buffer, offset) {
+ for (var i = 0; i < signalLength; ++i) {
+ buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength);
+ }
+}
+
+function bufferIsSilent(buffer, out) {
+ for (var i = 0; i < buffer.length; ++i) {
+ if (buffer.getChannelData(0)[i] != 0) {
+ if (out) {
+ out.soundOffset = i;
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+function onDelayOutput(e) {
+ switch(phase) {
+
+ case "initial":
+ // Wait for oscillator sound to exit delay
+ if (bufferIsSilent(e.inputBuffer))
+ break;
+
+ phase = "played oscillator";
+ break;
+
+ case "played oscillator":
+ // First tail time has expired. Start second source and remove references
+ // to the delay and connected second source.
+ oscillator.disconnect();
+ source.connect(delay);
+ source.start();
+ source = null;
+ delay = null;
+ phase = "started second source";
+ break;
+
+ case "second tail time":
+ if (delayCount == delayBufferCount) {
+ var ctx = e.target.context;
+ var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate);
+ applySignal(expected, delayBufferOffset);
+ compareBuffers(e.inputBuffer, expected);
+ e.target.onaudioprocess = null;
+ SimpleTest.finish();
+ }
+ }
+
+ delayCount++;
+}
+
+function onSourceOutput(e) {
+ switch(phase) {
+ case "started second source":
+ var out = {};
+ if (!bufferIsSilent(e.inputBuffer, out)) {
+ delayBufferCount += sourceCount;
+ delayBufferOffset = out.soundOffset;
+ phase = "played second source";
+ }
+ break;
+ case "played second source":
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+ phase = "second tail time";
+ e.target.onaudioprocess = null;
+ }
+
+ sourceCount++;
+}
+
+function startTest() {
+ var ctx = new AudioContext();
+ var delayDuration = delayLength / ctx.sampleRate;
+ delay = ctx.createDelay(delayDuration);
+ delay.delayTime.value = delayDuration;
+ var processor1 = ctx.createScriptProcessor(bufferSize, 1, 0);
+ delay.connect(processor1);
+ processor1.onaudioprocess = onDelayOutput;
+
+ // Signal to trigger initial tail time reference
+ oscillator = ctx.createOscillator();
+ oscillator.start(0);
+ oscillator.stop(100/ctx.sampleRate);
+ oscillator.connect(delay);
+
+ // Short signal, not started yet, with a ScriptProcessor to detect when it
+ // starts. It should finish before garbage collection.
+ var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate);
+ applySignal(buffer, 0);
+ source = ctx.createBufferSource();
+ source.buffer = buffer;
+ var processor2 = ctx.createScriptProcessor(bufferSize, 1, 0);
+ source.connect(processor2);
+ processor2.onaudioprocess = onSourceOutput;
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delayNodeWithGain.html b/dom/media/webaudio/test/test_delayNodeWithGain.html
new file mode 100644
index 0000000000..af075c7439
--- /dev/null
+++ b/dom/media/webaudio/test/test_delayNodeWithGain.html
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DelayNode with a GainNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+
+ var delay = context.createDelay();
+
+ source.buffer = buffer;
+
+ var gain = context.createGain();
+ gain.gain.value = 0.5;
+
+ source.connect(gain);
+ gain.connect(delay);
+
+ // Delay the source stream by 2048 frames
+ delay.delayTime.value = 2048 / context.sampleRate;
+
+ source.start(0);
+ return delay;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048 * 2, context.sampleRate);
+ for (var i = 2048; i < 2048 * 2; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i - 2048) / context.sampleRate) / 2;
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_delaynode-channel-count-1.html b/dom/media/webaudio/test/test_delaynode-channel-count-1.html
new file mode 100644
index 0000000000..dd964ef9e3
--- /dev/null
+++ b/dom/media/webaudio/test/test_delaynode-channel-count-1.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html>
+<title>Test that DelayNode output channelCount matches that of the delayed input</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+// See https://github.com/WebAudio/web-audio-api/issues/25
+
+// sampleRate is a power of two so that delay times are exact in base-2
+// floating point arithmetic.
+const SAMPLE_RATE = 32768;
+// Arbitrary delay time in frames (but this is assumed a multiple of block
+// size below):
+const DELAY_FRAMES = 3 * 128;
+// Implementations may apply interpolation to input samples, which can spread
+// the effect of input with larger channel counts over neighbouring blocks.
+// This test ignores enough neighbouring blocks to ignore the effects of
+// filter radius of up to this number of frames:
+const INTERPOLATION_GRACE = 128;
+// Number of frames of DelayNode output that are known to be stereo:
+const STEREO_FRAMES = 128;
+// The delay will be increased at this frame to switch DelayNode output back
+// to mono.
+const MONO_OUTPUT_START_FRAME =
+ DELAY_FRAMES + INTERPOLATION_GRACE + STEREO_FRAMES;
+// Number of frames of output that are known to be mono after the known stereo
+// and interpolation grace.
+const MONO_FRAMES = 128;
+// Total length allows for interpolation after effects of stereo input are
+// finished and one block to test return to mono output:
+const TOTAL_LENGTH =
+ MONO_OUTPUT_START_FRAME + INTERPOLATION_GRACE + MONO_FRAMES;
+// maxDelayTime, is a multiple of block size, because the Gecko implementation
+// once had a bug with delayTime = maxDelayTime in this situation:
+const MAX_DELAY_FRAMES = TOTAL_LENGTH + INTERPOLATION_GRACE;
+
+promise_test(() => {
+ let context = new OfflineAudioContext({numberOfChannels: 1,
+ length: TOTAL_LENGTH,
+ sampleRate: SAMPLE_RATE});
+
+ // Only channel 1 of the splitter is connected to the destination.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ splitter.connect(context.destination, 1);
+
+ // A gain node has channelCountMode "max" and channelInterpretation
+ // "speakers", and so will up-mix a mono input when there is stereo input.
+ let gain = new GainNode(context);
+ gain.connect(splitter);
+
+ // The delay node initially outputs a single channel of silence, when it
+ // does not have enough signal in its history to output what it has
+ // previously received. After the delay period, it will then output the
+ // stereo signal it received.
+ let delay =
+ new DelayNode(context,
+ {maxDelayTime: MAX_DELAY_FRAMES / context.sampleRate,
+ delayTime: DELAY_FRAMES / context.sampleRate});
+ // Schedule an increase in the delay to return to mono silent output from
+ // the unfilled portion of the DelayNode's buffer.
+ delay.delayTime.setValueAtTime(MAX_DELAY_FRAMES / context.sampleRate,
+ MONO_OUTPUT_START_FRAME / context.sampleRate);
+ delay.connect(gain);
+
+ let stereoMerger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ stereoMerger.connect(delay);
+
+ let leftOffset = 0.125;
+ let rightOffset = 0.5;
+ let leftSource = new ConstantSourceNode(context, {offset: leftOffset});
+ let rightSource = new ConstantSourceNode(context, {offset: rightOffset});
+ leftSource.start();
+ rightSource.start();
+ leftSource.connect(stereoMerger, 0, 0);
+ rightSource.connect(stereoMerger, 0, 1);
+ // Connect a mono source directly to the gain, so that even stereo silence
+ // will be detected in channel 1 of the gain output because it will cause
+ // the mono source to be up-mixed.
+ let monoOffset = 0.25
+ let monoSource = new ConstantSourceNode(context, {offset: monoOffset});
+ monoSource.start();
+ monoSource.connect(gain);
+
+ return context.startRendering().
+ then((buffer) => {
+ let output = buffer.getChannelData(0);
+
+ function assert_samples_equal(startIndex, length, expected, description)
+ {
+ for (let i = startIndex; i < startIndex + length; ++i) {
+ assert_equals(output[i], expected, description + ` at ${i}`);
+ }
+ }
+
+ assert_samples_equal(0, DELAY_FRAMES - INTERPOLATION_GRACE,
+ 0, "Initial mono");
+ assert_samples_equal(DELAY_FRAMES + INTERPOLATION_GRACE, STEREO_FRAMES,
+ monoOffset + rightOffset, "Stereo");
+ assert_samples_equal(MONO_OUTPUT_START_FRAME + INTERPOLATION_GRACE,
+ MONO_FRAMES,
+ 0, "Final mono");
+ });
+});
+
+</script>
diff --git a/dom/media/webaudio/test/test_disconnectAll.html b/dom/media/webaudio/test/test_disconnectAll.html
new file mode 100644
index 0000000000..f67b969949
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectAll.html
@@ -0,0 +1,51 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 256, context.sampleRate);
+ var data = sourceBuffer.getChannelData(0);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain1 = context.createGain();
+ var gain2 = context.createGain();
+ var gain3 = context.createGain();
+ var merger = context.createChannelMerger(3);
+
+ source.connect(gain1);
+ source.connect(gain2);
+ source.connect(gain3);
+ gain1.connect(merger);
+ gain2.connect(merger);
+ gain3.connect(merger);
+ source.start();
+
+ source.disconnect();
+
+ return merger;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html> \ No newline at end of file
diff --git a/dom/media/webaudio/test/test_disconnectAudioParam.html b/dom/media/webaudio/test/test_disconnectAudioParam.html
new file mode 100644
index 0000000000..bfa4f92312
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectAudioParam.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioParam</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 256, context.sampleRate);
+ var data = sourceBuffer.getChannelData(0);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var half = context.createGain();
+ var gain1 = context.createGain();
+ var gain2 = context.createGain();
+
+ half.gain.value = 0.5;
+
+ source.connect(gain1);
+ gain1.connect(gain2);
+ source.connect(half);
+
+ half.connect(gain1.gain);
+ half.connect(gain2.gain);
+
+ half.disconnect(gain2.gain);
+
+ source.start();
+
+ return gain2;
+ },
+ createExpectedBuffers(context) {
+ expectedBuffer = context.createBuffer(1, 256, context.sampleRate);
+ for (var i = 0; i < 256; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 1.5;
+ }
+
+ return expectedBuffer;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html>
diff --git a/dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html b/dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html
new file mode 100644
index 0000000000..533091f920
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html
@@ -0,0 +1,67 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioParam</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 2,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(2, 256, context.sampleRate);
+ for (var i = 1; i <= 2; i++) {
+ var data = sourceBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = i;
+ }
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var half = context.createGain();
+ var gain1 = context.createGain();
+ var gain2 = context.createGain();
+ var splitter = context.createChannelSplitter(2);
+
+ half.gain.value = 0.5;
+
+ source.connect(gain1);
+ gain1.connect(gain2);
+ source.connect(half);
+ half.connect(splitter);
+ splitter.connect(gain1.gain, 0);
+ splitter.connect(gain2.gain, 1);
+
+ splitter.disconnect(gain2.gain, 1);
+
+ source.start();
+
+ return gain2;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(2, 256, context.sampleRate);
+ for (var i = 1; i <= 2; i++) {
+ var data = expectedBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = (i == 1) ? 1.5 : 3.0;
+ }
+ }
+
+ return expectedBuffer;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html>
diff --git a/dom/media/webaudio/test/test_disconnectExceptions.html b/dom/media/webaudio/test/test_disconnectExceptions.html
new file mode 100644
index 0000000000..54fde4df8d
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectExceptions.html
@@ -0,0 +1,75 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var ctx = new AudioContext();
+ var sourceBuffer = ctx.createBuffer(2, 256, ctx.sampleRate);
+ for (var i = 1; i <= 2; i++) {
+ var data = sourceBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = i;
+ }
+ }
+
+ var source = ctx.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain1 = ctx.createGain();
+ var splitter = ctx.createChannelSplitter(2);
+ var merger = ctx.createChannelMerger(2);
+ var gain2 = ctx.createGain();
+ var gain3 = ctx.createGain();
+
+ gain1.connect(splitter);
+ splitter.connect(gain2, 0);
+ splitter.connect(gain3, 1);
+ splitter.connect(merger, 0, 0);
+ splitter.connect(merger, 1, 1);
+ gain2.connect(gain3);
+ gain3.connect(ctx.destination);
+ merger.connect(ctx.destination);
+
+ expectException(function() {
+ splitter.disconnect(2);
+ }, DOMException.INDEX_SIZE_ERR);
+
+ expectNoException(function() {
+ splitter.disconnect(1);
+ splitter.disconnect(1);
+ });
+
+ expectException(function() {
+ gain1.disconnect(gain2);
+ }, DOMException.INVALID_ACCESS_ERR);
+
+ expectException(function() {
+ gain1.disconnect(gain3);
+ ok(false, 'Should get InvalidAccessError exception');
+ }, DOMException.INVALID_ACCESS_ERR);
+
+ expectException(function() {
+ splitter.disconnect(gain2, 2);
+ }, DOMException.INDEX_SIZE_ERR);
+
+ expectException(function() {
+ splitter.disconnect(gain1, 0);
+ }, DOMException.INVALID_ACCESS_ERR);
+
+ expectException(function() {
+ splitter.disconnect(gain3, 0, 0);
+ }, DOMException.INVALID_ACCESS_ERR);
+
+ expectException(function() {
+ splitter.disconnect(merger, 3, 0);
+ }, DOMException.INDEX_SIZE_ERR);
+ </script>
+ </pre>
+ </body>
+</html>
diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNode.html b/dom/media/webaudio/test/test_disconnectFromAudioNode.html
new file mode 100644
index 0000000000..e6ec9d941c
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectFromAudioNode.html
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 256, context.sampleRate);
+ var data = sourceBuffer.getChannelData(0);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain1 = context.createGain();
+ var gain2 = context.createGain();
+ var gain3 = context.createGain();
+
+ source.connect(gain1);
+ source.connect(gain2);
+
+ gain1.connect(gain3);
+ gain2.connect(gain3);
+
+ source.start();
+
+ source.disconnect(gain2);
+
+ return gain3;
+ },
+ createExpectedBuffers(context) {
+ expectedBuffer = context.createBuffer(1, 256, context.sampleRate);
+ for (var i = 0; i < 256; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 1.0;
+ }
+
+ return expectedBuffer;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html>
diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html
new file mode 100644
index 0000000000..566a84edbd
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html
@@ -0,0 +1,59 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 2,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(2, 256, context.sampleRate);
+ for (var i = 1; i <= 2; i++) {
+ var data = sourceBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = i;
+ }
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var splitter = context.createChannelSplitter(2);
+ var gain1 = context.createGain();
+ var gain2 = context.createGain();
+ var merger = context.createChannelMerger(2);
+
+ source.connect(splitter);
+ splitter.connect(gain1, 0);
+ splitter.connect(gain2, 0);
+ splitter.connect(gain2, 1);
+ gain1.connect(merger, 0, 1);
+ gain2.connect(merger, 0, 1);
+ source.start();
+
+ splitter.disconnect(gain2, 0);
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ expectedBuffer = context.createBuffer(2, 256, context.sampleRate);
+ for (var i = 0; i < 256; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 0;
+ expectedBuffer.getChannelData(1)[i] = 3;
+ }
+
+ return expectedBuffer;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html>
diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html
new file mode 100644
index 0000000000..478768c62d
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html
@@ -0,0 +1,57 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 3,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(3, 256, context.sampleRate);
+ for (var i = 1; i <= 3; i++) {
+ var data = sourceBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = i;
+ }
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var splitter = context.createChannelSplitter(3);
+ var merger = context.createChannelMerger(3);
+
+ source.connect(splitter);
+ splitter.connect(merger, 0, 0);
+ splitter.connect(merger, 1, 1);
+ splitter.connect(merger, 2, 2);
+ source.start();
+
+ splitter.disconnect(merger, 2, 2);
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(3, 256, context.sampleRate);
+ for (var i = 1; i <= 3; i++) {
+ var data = expectedBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = (i == 3) ? 0 : i;
+ }
+ }
+
+ return expectedBuffer;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html> \ No newline at end of file
diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html b/dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html
new file mode 100644
index 0000000000..dff1562d7a
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html
@@ -0,0 +1,56 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>
+ Test whether we can disconnect all outbound connection of an AudioNode
+ </title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 2,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 256, context.sampleRate);
+ var data = sourceBuffer.getChannelData(0);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var merger = context.createChannelMerger(2);
+ var gain = context.createGain();
+
+ source.connect(merger, 0, 0);
+ source.connect(gain);
+ source.connect(merger, 0, 1);
+
+ source.disconnect(merger);
+
+ source.start();
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ expectedBuffer = context.createBuffer(2, 256, context.sampleRate);
+ for (var channel = 0; channel < 2; channel++) {
+ for (var i = 0; i < 256; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 0;
+ }
+ }
+
+ return expectedBuffer;
+ }
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html>
diff --git a/dom/media/webaudio/test/test_disconnectFromOutput.html b/dom/media/webaudio/test/test_disconnectFromOutput.html
new file mode 100644
index 0000000000..9a7fe354a9
--- /dev/null
+++ b/dom/media/webaudio/test/test_disconnectFromOutput.html
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML>
+<html>
+ <head>
+ <title>Test whether we can disconnect an AudioNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+ </head>
+ <body>
+ <pre id="test">
+ <script class="testbody" type="text/javascript">
+ var gTest = {
+ length: 256,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(3, 256, context.sampleRate);
+ for (var i = 1; i <= 3; i++) {
+ var data = sourceBuffer.getChannelData(i-1);
+ for (var j = 0; j < data.length; j++) {
+ data[j] = i;
+ }
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var splitter = context.createChannelSplitter(3);
+ var sum = context.createGain();
+
+ source.connect(splitter);
+ splitter.connect(sum, 0);
+ splitter.connect(sum, 1);
+ splitter.connect(sum, 2);
+ source.start();
+
+ splitter.disconnect(1);
+
+ return sum;
+ },
+ createExpectedBuffers(context) {
+ expectedBuffer = context.createBuffer(1, 256, context.sampleRate);
+ for (var i = 0; i < 256; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 4;
+ }
+
+ return expectedBuffer;
+ },
+ };
+
+ runTest();
+ </script>
+ </pre>
+ </body>
+</html> \ No newline at end of file
diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNode.html b/dom/media/webaudio/test/test_dynamicsCompressorNode.html
new file mode 100644
index 0000000000..05b6887a53
--- /dev/null
+++ b/dom/media/webaudio/test/test_dynamicsCompressorNode.html
@@ -0,0 +1,68 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DynamicsCompressorNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function near(a, b, msg) {
+ ok(Math.abs(a - b) < 1e-4, msg);
+}
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+
+ var osc = context.createOscillator();
+ var sp = context.createScriptProcessor();
+
+ var compressor = new DynamicsCompressorNode(context);
+
+ osc.connect(compressor);
+ osc.connect(sp);
+ compressor.connect(context.destination);
+
+ is(compressor.channelCount, 2, "compressor node has 2 input channels by default");
+ is(compressor.channelCountMode, "clamped-max", "Correct channelCountMode for the compressor node");
+ is(compressor.channelInterpretation, "speakers", "Correct channelCountInterpretation for the compressor node");
+
+ // Verify default values
+ ok(compressor.threshold instanceof AudioParam, "treshold is an AudioParam");
+ near(compressor.threshold.defaultValue, -24, "Correct default value for threshold");
+ ok(compressor.knee instanceof AudioParam, "knee is an AudioParam");
+ near(compressor.knee.defaultValue, 30, "Correct default value for knee");
+ ok(compressor.ratio instanceof AudioParam, "knee is an AudioParam");
+ near(compressor.ratio.defaultValue, 12, "Correct default value for ratio");
+ is(typeof compressor.reduction, "number", "reduction is a number");
+ near(compressor.reduction, 0, "Correct default value for reduction");
+ ok(compressor.attack instanceof AudioParam, "attack is an AudioParam");
+ near(compressor.attack.defaultValue, 0.003, "Correct default value for attack");
+ ok(compressor.release instanceof AudioParam, "release is an AudioParam");
+ near(compressor.release.defaultValue, 0.25, "Correct default value for release");
+
+ compressor.threshold.value = -80;
+
+ osc.start();
+ var iteration = 0;
+ sp.onaudioprocess = function(e) {
+ if (iteration > 10) {
+ ok(compressor.reduction < 0,
+ "Feeding a full-scale sine to a compressor should result in an db" +
+ "reduction.");
+ sp.onaudioprocess = null;
+ osc.stop(0);
+
+ SimpleTest.finish();
+ }
+ iteration++;
+ }
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html b/dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html
new file mode 100644
index 0000000000..9e8d794547
--- /dev/null
+++ b/dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test DynamicsCompressorNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var compressor = context.createDynamicsCompressor();
+
+ source.buffer = this.buffer;
+
+ source.connect(compressor);
+
+ var compressorWrapped = SpecialPowers.wrap(compressor);
+ ok("passThrough" in compressorWrapped, "DynamicsCompressorNode should support the passThrough API");
+ compressorWrapped.passThrough = true;
+
+ source.start(0);
+ return compressor;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html b/dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html
new file mode 100644
index 0000000000..7e6487e3f8
--- /dev/null
+++ b/dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html
@@ -0,0 +1,51 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset="utf-8">
+ <title>Test DynamicsCompressor with Gain</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+addLoadEvent(function() {
+ var samplerate = 44100;
+ var context = new OfflineAudioContext(1, samplerate/100, samplerate);
+
+ var osc = context.createOscillator();
+ osc.frequency.value = 2400;
+
+ var gain = context.createGain();
+ gain.gain.value = 1.5;
+
+ // These numbers are borrowed from the example code on MDN
+ // https://developer.mozilla.org/en-US/docs/Web/API/DynamicsCompressorNode
+ var compressor = context.createDynamicsCompressor();
+ compressor.threshold.value = -50;
+ compressor.knee.value = 40;
+ compressor.ratio.value = 12;
+ compressor.reduction.value = -20;
+ compressor.attack.value = 0;
+ compressor.release.value = 0.25;
+
+ osc.connect(gain);
+ gain.connect(compressor);
+ compressor.connect(context.destination);
+ osc.start();
+
+ context.startRendering().then(buffer => {
+ var peak = Math.max(...buffer.getChannelData(0));
+ console.log(peak);
+ // These values are experimentally determined. Without dynamics compression
+ // the peak should be just under 1.5. We also check for a minimum value
+ // to make sure we are not getting all zeros.
+ ok(peak >= 0.2 && peak < 1.0, "Peak value should be greater than 0.25 and less than 1.0");
+ SimpleTest.finish();
+ });
+});
+</script>
+<pre>
+</pre>
+</body>
diff --git a/dom/media/webaudio/test/test_event_listener_leaks.html b/dom/media/webaudio/test/test_event_listener_leaks.html
new file mode 100644
index 0000000000..a3bcc9259e
--- /dev/null
+++ b/dom/media/webaudio/test/test_event_listener_leaks.html
@@ -0,0 +1,47 @@
+<!--
+ Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+-->
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Bug 1450358 - Test AudioContext event listener leak conditions</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="/tests/dom/events/test/event_leak_utils.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<script class="testbody" type="text/javascript">
+// Manipulate AudioContext objects in the frame's context.
+// Its important here that we create a listener callback from
+// the DOM objects back to the frame's global in order to
+// exercise the leak condition.
+async function useAudioContext(contentWindow) {
+ let ctx = new contentWindow.AudioContext();
+ ctx.onstatechange = e => {
+ contentWindow.stateChangeCount += 1;
+ };
+
+ let osc = ctx.createOscillator();
+ osc.type = "sine";
+ osc.frequency.value = 440;
+ osc.start();
+}
+
+async function runTest() {
+ try {
+ await checkForEventListenerLeaks("AudioContext", useAudioContext);
+ } catch (e) {
+ ok(false, e);
+ } finally {
+ SimpleTest.finish();
+ }
+}
+
+SimpleTest.waitForExplicitFinish();
+addEventListener("load", runTest, { once: true });
+</script>
+</pre>
+</body>
+</html>
+
diff --git a/dom/media/webaudio/test/test_gainNode.html b/dom/media/webaudio/test/test_gainNode.html
new file mode 100644
index 0000000000..77b0ae88b0
--- /dev/null
+++ b/dom/media/webaudio/test/test_gainNode.html
@@ -0,0 +1,72 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test GainNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+
+ var gain = new GainNode(context);
+ ok(gain.gain, "The audioparam member must exist");
+ is(gain.gain.value, 1.0, "Correct initial value");
+ is(gain.gain.defaultValue, 1.0, "Correct default value");
+ gain.gain.value = 0.5;
+ is(gain.gain.value, 0.5, "Correct initial value");
+ is(gain.gain.defaultValue, 1.0, "Correct default value");
+
+ gain = new GainNode(context, { gain: 0.5 });
+ ok(gain.gain, "The audioparam member must exist");
+ is(gain.gain.value, 0.5, "Correct initial value");
+ is(gain.gain.defaultValue, 1.0, "Correct default value");
+ gain.gain.value = 0.5;
+ is(gain.gain.value, 0.5, "Correct initial value");
+ is(gain.gain.defaultValue, 1.0, "Correct default value");
+
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ gain = context.createGain();
+ source.connect(gain);
+
+ ok(gain.gain, "The audioparam member must exist");
+ is(gain.gain.value, 1.0, "Correct initial value");
+ is(gain.gain.defaultValue, 1.0, "Correct default value");
+ gain.gain.value = 0.5;
+ is(gain.gain.value, 0.5, "Correct initial value");
+ is(gain.gain.defaultValue, 1.0, "Correct default value");
+ is(gain.channelCount, 2, "gain node has 2 input channels by default");
+ is(gain.channelCountMode, "max", "Correct channelCountMode for the gain node");
+ is(gain.channelInterpretation, "speakers", "Correct channelCountInterpretation for the gain node");
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate) / 2;
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_gainNodeInLoop.html b/dom/media/webaudio/test/test_gainNodeInLoop.html
new file mode 100644
index 0000000000..90dedc0ef4
--- /dev/null
+++ b/dom/media/webaudio/test/test_gainNodeInLoop.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test GainNode in presence of loops</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+ source.loop = true;
+ source.start(0);
+ source.stop(sourceBuffer.duration * 2);
+
+ var gain = context.createGain();
+ // Adjust the gain in a way that we don't just end up modifying AudioChunk::mVolume
+ gain.gain.setValueAtTime(0.5, 0);
+ source.connect(gain);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate);
+ for (var i = 0; i < 4096; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 0.5;
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_gainNodePassThrough.html b/dom/media/webaudio/test/test_gainNodePassThrough.html
new file mode 100644
index 0000000000..5a973ccaa2
--- /dev/null
+++ b/dom/media/webaudio/test/test_gainNodePassThrough.html
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test GainNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var gain = context.createGain();
+
+ source.buffer = this.buffer;
+
+ source.connect(gain);
+
+ gain.gain.value = 0.5;
+
+ var gainWrapped = SpecialPowers.wrap(gain);
+ ok("passThrough" in gainWrapped, "GainNode should support the passThrough API");
+ gainWrapped.passThrough = true;
+
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_iirFilterNodePassThrough.html b/dom/media/webaudio/test/test_iirFilterNodePassThrough.html
new file mode 100644
index 0000000000..ab54499e04
--- /dev/null
+++ b/dom/media/webaudio/test/test_iirFilterNodePassThrough.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test IIRFilterNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var filter = context.createIIRFilter([0.5, 0.5], [1.0]);
+
+ source.buffer = this.buffer;
+
+ source.connect(filter);
+
+ var filterWrapped = SpecialPowers.wrap(filter);
+ ok("passThrough" in filterWrapped, "BiquadFilterNode should support the passThrough API");
+ filterWrapped.passThrough = true;
+
+ source.start(0);
+ return filter;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_maxChannelCount.html b/dom/media/webaudio/test/test_maxChannelCount.html
new file mode 100644
index 0000000000..1a4e5c856e
--- /dev/null
+++ b/dom/media/webaudio/test/test_maxChannelCount.html
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the AudioContext.destination interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// Work around bug 911777
+SpecialPowers.forceGC();
+SpecialPowers.forceCC();
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var ac = new AudioContext();
+ ok(ac.destination.maxChannelCount > 0, "We can query the maximum number of channels");
+
+ var oac = new OfflineAudioContext(2, 1024, 48000);
+ is(oac.destination.maxChannelCount, 2, "This OfflineAudioContext should have 2 max channels.");
+
+ oac = new OfflineAudioContext(6, 1024, 48000);
+ is(oac.destination.maxChannelCount, 6, "This OfflineAudioContext should have 6 max channels.");
+
+ expectException(function() {
+ oac.destination.channelCount = oac.destination.channelCount + 1;
+ }, DOMException.INDEX_SIZE_ERR);
+
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaDecoding.html b/dom/media/webaudio/test/test_mediaDecoding.html
new file mode 100644
index 0000000000..d796ef6add
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaDecoding.html
@@ -0,0 +1,388 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the decodeAudioData API and Resampling</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script type="text/javascript">
+
+// These routines have been copied verbatim from WebKit, and are used in order
+// to convert a memory buffer into a wave buffer.
+function writeString(s, a, offset) {
+ for (var i = 0; i < s.length; ++i) {
+ a[offset + i] = s.charCodeAt(i);
+ }
+}
+
+function writeInt16(n, a, offset) {
+ n = Math.floor(n);
+
+ var b1 = n & 255;
+ var b2 = (n >> 8) & 255;
+
+ a[offset + 0] = b1;
+ a[offset + 1] = b2;
+}
+
+function writeInt32(n, a, offset) {
+ n = Math.floor(n);
+ var b1 = n & 255;
+ var b2 = (n >> 8) & 255;
+ var b3 = (n >> 16) & 255;
+ var b4 = (n >> 24) & 255;
+
+ a[offset + 0] = b1;
+ a[offset + 1] = b2;
+ a[offset + 2] = b3;
+ a[offset + 3] = b4;
+}
+
+function writeAudioBuffer(audioBuffer, a, offset) {
+ var n = audioBuffer.length;
+ var channels = audioBuffer.numberOfChannels;
+
+ for (var i = 0; i < n; ++i) {
+ for (var k = 0; k < channels; ++k) {
+ var buffer = audioBuffer.getChannelData(k);
+ var sample = buffer[i] * 32768.0;
+
+ // Clip samples to the limitations of 16-bit.
+ // If we don't do this then we'll get nasty wrap-around distortion.
+ if (sample < -32768)
+ sample = -32768;
+ if (sample > 32767)
+ sample = 32767;
+
+ writeInt16(sample, a, offset);
+ offset += 2;
+ }
+ }
+}
+
+function createWaveFileData(audioBuffer) {
+ var frameLength = audioBuffer.length;
+ var numberOfChannels = audioBuffer.numberOfChannels;
+ var sampleRate = audioBuffer.sampleRate;
+ var bitsPerSample = 16;
+ var byteRate = sampleRate * numberOfChannels * bitsPerSample/8;
+ var blockAlign = numberOfChannels * bitsPerSample/8;
+ var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio
+ var headerByteLength = 44;
+ var totalLength = headerByteLength + wavDataByteLength;
+
+ var waveFileData = new Uint8Array(totalLength);
+
+ var subChunk1Size = 16; // for linear PCM
+ var subChunk2Size = wavDataByteLength;
+ var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size);
+
+ writeString("RIFF", waveFileData, 0);
+ writeInt32(chunkSize, waveFileData, 4);
+ writeString("WAVE", waveFileData, 8);
+ writeString("fmt ", waveFileData, 12);
+
+ writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4)
+ writeInt16(1, waveFileData, 20); // AudioFormat (2)
+ writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2)
+ writeInt32(sampleRate, waveFileData, 24); // SampleRate (4)
+ writeInt32(byteRate, waveFileData, 28); // ByteRate (4)
+ writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2)
+ writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4)
+
+ writeString("data", waveFileData, 36);
+ writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4)
+
+ // Write actual audio data starting at offset 44.
+ writeAudioBuffer(audioBuffer, waveFileData, 44);
+
+ return waveFileData;
+}
+
+</script>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+// fuzzTolerance and fuzzToleranceMobile are used to determine fuzziness
+// thresholds. They're needed to make sure that we can deal with neglibible
+// differences in the binary buffer caused as a result of resampling the
+// audio. fuzzToleranceMobile is typically larger on mobile platforms since
+// we do fixed-point resampling as opposed to floating-point resampling on
+// those platforms.
+var files = [
+ // An ogg file, 44.1khz, mono
+ {
+ url: "ting-44.1k-1ch.ogg",
+ valid: true,
+ expectedUrl: "ting-44.1k-1ch.wav",
+ numberOfChannels: 1,
+ frames: 30592,
+ sampleRate: 44100,
+ duration: 0.693,
+ fuzzTolerance: 5,
+ fuzzToleranceMobile: 1284
+ },
+ // An ogg file, 44.1khz, stereo
+ {
+ url: "ting-44.1k-2ch.ogg",
+ valid: true,
+ expectedUrl: "ting-44.1k-2ch.wav",
+ numberOfChannels: 2,
+ frames: 30592,
+ sampleRate: 44100,
+ duration: 0.693,
+ fuzzTolerance: 6,
+ fuzzToleranceMobile: 2544
+ },
+ // An ogg file, 48khz, mono
+ {
+ url: "ting-48k-1ch.ogg",
+ valid: true,
+ expectedUrl: "ting-48k-1ch.wav",
+ numberOfChannels: 1,
+ frames: 33297,
+ sampleRate: 48000,
+ duration: 0.693,
+ fuzzTolerance: 5,
+ fuzzToleranceMobile: 1388
+ },
+ // An ogg file, 48khz, stereo
+ {
+ url: "ting-48k-2ch.ogg",
+ valid: true,
+ expectedUrl: "ting-48k-2ch.wav",
+ numberOfChannels: 2,
+ frames: 33297,
+ sampleRate: 48000,
+ duration: 0.693,
+ fuzzTolerance: 14,
+ fuzzToleranceMobile: 2752
+ },
+ // Make sure decoding a wave file results in the same buffer (for both the
+ // resampling and non-resampling cases)
+ {
+ url: "ting-44.1k-1ch.wav",
+ valid: true,
+ expectedUrl: "ting-44.1k-1ch.wav",
+ numberOfChannels: 1,
+ frames: 30592,
+ sampleRate: 44100,
+ duration: 0.693,
+ fuzzTolerance: 0,
+ fuzzToleranceMobile: 0
+ },
+ {
+ url: "ting-48k-1ch.wav",
+ valid: true,
+ expectedUrl: "ting-48k-1ch.wav",
+ numberOfChannels: 1,
+ frames: 33297,
+ sampleRate: 48000,
+ duration: 0.693,
+ fuzzTolerance: 0,
+ fuzzToleranceMobile: 0
+ },
+ // // A wave file
+ // //{ url: "24bit-44khz.wav", valid: true, expectedUrl: "24bit-44khz-expected.wav" },
+ // A non-audio file
+ { url: "invalid.txt", valid: false, sampleRate: 44100 },
+ // A webm file with no audio
+ { url: "noaudio.webm", valid: false, sampleRate: 48000 },
+ // A video ogg file with audio
+ {
+ url: "audio.ogv",
+ valid: true,
+ expectedUrl: "audio-expected.wav",
+ numberOfChannels: 2,
+ sampleRate: 44100,
+ frames: 47680,
+ duration: 1.0807,
+ fuzzTolerance: 106,
+ fuzzToleranceMobile: 3482
+ },
+ {
+ url: "nil-packet.ogg",
+ expectedUrl: null,
+ valid: true,
+ numberOfChannels: 2,
+ sampleRate: 48000,
+ frames: 18600,
+ duration: 0.3874,
+ }
+];
+
+// Returns true if the memory buffers are less different that |fuzz| bytes
+function fuzzyMemcmp(buf1, buf2, fuzz) {
+ var result = true;
+ var difference = 0;
+ is(buf1.length, buf2.length, "same length");
+ for (var i = 0; i < buf1.length; ++i) {
+ if (Math.abs(buf1[i] - buf2[i])) {
+ ++difference;
+ }
+ }
+ if (difference > fuzz) {
+ ok(false, "Expected at most " + fuzz + " bytes difference, found " + difference + " bytes");
+ }
+ return difference <= fuzz;
+}
+
+function getFuzzTolerance(test) {
+ var kIsMobile =
+ navigator.userAgent.includes("Mobile") || // b2g
+ navigator.userAgent.includes("Android"); // android
+ return kIsMobile ? test.fuzzToleranceMobile : test.fuzzTolerance;
+}
+
+function bufferIsSilent(buffer) {
+ for (var i = 0; i < buffer.length; ++i) {
+ if (buffer.getChannelData(0)[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+function checkAudioBuffer(buffer, test) {
+ if (buffer.numberOfChannels != test.numberOfChannels) {
+ is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels");
+ return;
+ }
+ ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration");
+ if (Math.abs(buffer.duration - test.duration) >= 1e-3) {
+ ok(false, "got: " + buffer.duration + ", expected: " + test.duration);
+ }
+ is(buffer.sampleRate, test.sampleRate, "Correct sample rate");
+ is(buffer.length, test.frames, "Correct length");
+
+ var wave = createWaveFileData(buffer);
+ if (test.expectedWaveData) {
+ ok(fuzzyMemcmp(wave, test.expectedWaveData, getFuzzTolerance(test)), "Received expected decoded data");
+ }
+}
+
+function checkResampledBuffer(buffer, test, callback) {
+ if (buffer.numberOfChannels != test.numberOfChannels) {
+ is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels");
+ return;
+ }
+ ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration");
+ if (Math.abs(buffer.duration - test.duration) >= 1e-3) {
+ ok(false, "got: " + buffer.duration + ", expected: " + test.duration);
+ }
+ // Take into account the resampling when checking the size
+ var expectedLength = test.frames * buffer.sampleRate / test.sampleRate;
+ SimpleTest.ok(
+ Math.abs(buffer.length - expectedLength) < 1.0,
+ "Correct length - got " + buffer.length +
+ ", expected about " + expectedLength
+ );
+
+ // Playback the buffer in the original context, to resample back to the
+ // original rate and compare with the decoded buffer without resampling.
+ cx = test.nativeContext;
+ var expected = cx.createBufferSource();
+ expected.buffer = test.expectedBuffer;
+ expected.start();
+ var inverse = cx.createGain();
+ inverse.gain.value = -1;
+ expected.connect(inverse);
+ inverse.connect(cx.destination);
+ var resampled = cx.createBufferSource();
+ resampled.buffer = buffer;
+ resampled.start();
+ // This stop should do nothing, but it tests for bug 937475
+ resampled.stop(test.frames / cx.sampleRate);
+ resampled.connect(cx.destination);
+ cx.oncomplete = function(e) {
+ ok(!bufferIsSilent(e.renderedBuffer), "Expect buffer not silent");
+ // Resampling will lose the highest frequency components, so we should
+ // pass the difference through a low pass filter. However, either the
+ // input files don't have significant high frequency components or the
+ // tolerance in compareBuffers() is too high to detect them.
+ compareBuffers(e.renderedBuffer,
+ cx.createBuffer(test.numberOfChannels,
+ test.frames, test.sampleRate));
+ callback();
+ }
+ cx.startRendering();
+}
+
+function runResampling(test, response, callback) {
+ var sampleRate = test.sampleRate == 44100 ? 48000 : 44100;
+ var cx = new OfflineAudioContext(1, 1, sampleRate);
+ cx.decodeAudioData(response, function onSuccess(asyncResult) {
+ is(asyncResult.sampleRate, sampleRate, "Correct sample rate");
+
+ checkResampledBuffer(asyncResult, test, callback);
+ }, function onFailure() {
+ ok(false, "Expected successful decode with resample");
+ callback();
+ });
+}
+
+function runTest(test, response, callback) {
+ // We need to copy the array here, because decodeAudioData will detach the
+ // array's buffer.
+ var compressedAudio = response.slice(0);
+ var expectCallback = false;
+ var cx = new OfflineAudioContext(test.numberOfChannels || 1,
+ test.frames || 1, test.sampleRate);
+ cx.decodeAudioData(response, function onSuccess(asyncResult) {
+ ok(expectCallback, "Success callback should fire asynchronously");
+ ok(test.valid, "Did expect success for test " + test.url);
+
+ checkAudioBuffer(asyncResult, test);
+
+ test.expectedBuffer = asyncResult;
+ test.nativeContext = cx;
+ runResampling(test, compressedAudio, callback);
+ }, function onFailure(e) {
+ ok(e instanceof DOMException, "We want to see an exception here");
+ is(e.name, "EncodingError", "Exception name matches");
+ ok(expectCallback, "Failure callback should fire asynchronously");
+ ok(!test.valid, "Did expect failure for test " + test.url);
+ callback();
+ });
+ expectCallback = true;
+}
+
+function loadTest(test, callback) {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", test.url, true);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = function() {
+ if (!test.expectedUrl) {
+ runTest(test, xhr.response, callback);
+ return;
+ }
+ var getExpected = new XMLHttpRequest();
+ getExpected.open("GET", test.expectedUrl, true);
+ getExpected.responseType = "arraybuffer";
+ getExpected.onload = function() {
+ test.expectedWaveData = new Uint8Array(getExpected.response);
+ runTest(test, xhr.response, callback);
+ };
+ getExpected.send();
+ };
+ xhr.send();
+}
+
+function loadNextTest() {
+ if (files.length) {
+ loadTest(files.shift(), loadNextTest);
+ } else {
+ SimpleTest.finish();
+ }
+}
+
+loadNextTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html
new file mode 100644
index 0000000000..36e1e9f2cc
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html
@@ -0,0 +1,74 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaElementAudioSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+function test() {
+ var audio = new Audio("small-shot.ogg");
+ var context = new AudioContext();
+ var expectedMinNonzeroSampleCount;
+ var expectedMaxNonzeroSampleCount;
+ var nonzeroSampleCount = 0;
+ var complete = false;
+ var iterationCount = 0;
+
+ // This test ensures we receive at least expectedSampleCount nonzero samples
+ function processSamples(e) {
+ if (complete) {
+ return;
+ }
+
+ if (iterationCount == 0) {
+ // Don't start playing the audio until the AudioContext stuff is connected
+ // and running.
+ audio.play();
+ }
+ ++iterationCount;
+
+ var buf = e.inputBuffer.getChannelData(0);
+ var nonzeroSamplesThisBuffer = 0;
+ for (var i = 0; i < buf.length; ++i) {
+ if (buf[i] != 0) {
+ ++nonzeroSamplesThisBuffer;
+ }
+ }
+ nonzeroSampleCount += nonzeroSamplesThisBuffer;
+ is(e.inputBuffer.numberOfChannels, 1,
+ "Checking data channel count (nonzeroSamplesThisBuffer=" +
+ nonzeroSamplesThisBuffer + ")");
+ ok(nonzeroSampleCount <= expectedMaxNonzeroSampleCount,
+ "Too many nonzero samples (got " + nonzeroSampleCount + ", expected max " + expectedMaxNonzeroSampleCount + ")");
+ if (nonzeroSampleCount >= expectedMinNonzeroSampleCount &&
+ nonzeroSamplesThisBuffer == 0) {
+ ok(true,
+ "Check received enough nonzero samples (got " + nonzeroSampleCount + ", expected min " + expectedMinNonzeroSampleCount + ")");
+ SimpleTest.finish();
+ complete = true;
+ }
+ }
+
+ audio.onloadedmetadata = function() {
+ var node = new MediaElementAudioSourceNode(context, { mediaElement: audio });
+ var sp = context.createScriptProcessor(2048, 1);
+ node.connect(sp);
+ // Use a fuzz factor of 100 to account for samples that just happen to be zero
+ expectedMinNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) - 100;
+ expectedMaxNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) + 500;
+ sp.onaudioprocess = processSamples;
+ };
+}
+
+SpecialPowers.pushPrefEnv({"set": [["media.preload.default", 2], ["media.preload.auto", 3]]}, test);
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html
new file mode 100644
index 0000000000..778658b332
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html
@@ -0,0 +1,94 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+// Turn off the authentication dialog blocking for this test.
+SpecialPowers.setIntPref("network.auth.subresource-http-auth-allow", 2)
+
+var tests = [
+ // Not the same origin no CORS asked for, should have silence
+ { url: "http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg",
+ cors: null,
+ expectSilence: true },
+ // Same origin, should have sound
+ { url: "small-shot.ogg",
+ cors: null,
+ expectSilence: false },
+ // Cross-origin but we asked for CORS and the server answered with the right
+ // header, should have
+ { url: "http://example.org:80/tests/dom/media/webaudio/test/corsServer.sjs",
+ cors: "anonymous",
+ expectSilence: false }
+];
+
+var testsRemaining = tests.length;
+
+tests.forEach(function(e) {
+ e.ac = new AudioContext();
+ var a = new Audio();
+ if (e.cors) {
+ a.crossOrigin = e.cors;
+ }
+ a.src = e.url;
+ document.body.appendChild(a);
+
+ a.onloadedmetadata = () => {
+ // Wait for "loadedmetadata" before capturing since tracks are then known
+ // directly. If we set up the capture before "loadedmetadata" we
+ // (internally) have to wait an extra async jump for tracks to become known
+ // to main thread, before setting up audio data forwarding to the node.
+ // As that happens, the audio resource may have already ended on slow test
+ // machines, causing failures.
+ a.onloadedmetadata = null;
+ var measn = e.ac.createMediaElementSource(a);
+ var sp = e.ac.createScriptProcessor(2048, 1);
+ sp.seenSound = false;
+ sp.onaudioprocess = checkBufferSilent;
+
+ measn.connect(sp);
+ a.play();
+ };
+
+ function checkFinished(sp) {
+ if (a.ended) {
+ sp.onaudioprocess = null;
+ var not = e.expectSilence ? "" : "not";
+ is(e.expectSilence, !sp.seenSound,
+ "Buffer is " + not + " silent as expected, for " +
+ e.url + " (cors: " + e.cors + ")");
+ if (--testsRemaining == 0) {
+ SimpleTest.finish();
+ }
+ }
+ }
+
+ function checkBufferSilent(e) {
+ var inputArrayBuffer = e.inputBuffer.getChannelData(0);
+ var silent = true;
+ for (var i = 0; i < inputArrayBuffer.length; i++) {
+ if (inputArrayBuffer[i] != 0.0) {
+ silent = false;
+ break;
+ }
+ }
+ // It is acceptable to find a full buffer of silence here, even if we expect
+ // sound, because Gecko's looping on media elements is not seamless and we
+ // can underrun. We are looking for at least one buffer of non-silent data.
+ e.target.seenSound = !silent || e.target.seenSound;
+ checkFinished(e.target);
+ return silent;
+ }
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html
new file mode 100644
index 0000000000..42d6d6a045
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html
@@ -0,0 +1,137 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+function binIndexForFrequency(frequency, analyser) {
+ return 1 + Math.round(frequency *
+ analyser.fftSize /
+ analyser.context.sampleRate);
+}
+
+function debugCanvas(analyser) {
+ var cvs = document.createElement("canvas");
+ document.body.appendChild(cvs);
+
+ // Easy: 1px per bin
+ cvs.width = analyser.frequencyBinCount;
+ cvs.height = 256;
+ cvs.style.border = "1px solid red";
+
+ var c = cvs.getContext('2d');
+ var buf = new Uint8Array(analyser.frequencyBinCount);
+
+ function render() {
+ c.clearRect(0, 0, cvs.width, cvs.height);
+ analyser.getByteFrequencyData(buf);
+ for (var i = 0; i < buf.length; i++) {
+ c.fillRect(i, (256 - (buf[i])), 1, 256);
+ }
+ requestAnimationFrame(render);
+ }
+ requestAnimationFrame(render);
+}
+
+
+function checkFrequency(an) {
+ an.getFloatFrequencyData(frequencyArray);
+ // We should have no energy when checking the data largely outside the index
+ // for 440Hz (the frequency of the sine wave), start checking an octave above,
+ // the Opus compression can add some harmonics to the pure since wave.
+ var maxNoiseIndex = binIndexForFrequency(880, an);
+ for (var i = maxNoiseIndex + 1; i < frequencyArray.length; i++) {
+ if (frequencyArray[i] > frequencyArray[maxNoiseIndex]) {
+ maxNoiseIndex = i;
+ }
+ }
+
+ // On the other hand, we should find a peak at 440Hz. Our sine wave is not
+ // attenuated, we're expecting the peak to reach 0dBFs.
+ var index = binIndexForFrequency(440, an);
+ info("energy at 440: " + frequencyArray[index] +
+ ", threshold " + (an.maxDecibels - 10) +
+ "; max noise at index " + maxNoiseIndex +
+ ": " + frequencyArray[maxNoiseIndex] );
+ if (frequencyArray[index] < (an.maxDecibels - 10)) {
+ return false;
+ }
+ // Let some slack, there might be some noise here because of int -> float
+ // conversion or the Opus encoding.
+ if (frequencyArray[maxNoiseIndex] > an.minDecibels + 40) {
+ return false;
+ }
+
+ return true;
+}
+
+var audioElement = new Audio();
+audioElement.src = 'sine-440-10s.opus'
+audioElement.loop = true;
+var ac = new AudioContext();
+var mediaElementSource = ac.createMediaElementSource(audioElement);
+var an = ac.createAnalyser();
+// Use no smoothing as this would just average with previous
+// getFloatFrequencyData() calls. Non-seamless looping would introduce noise,
+// and smoothing would spread this into calls after the loop point.
+an.smoothingTimeConstant = 0;
+frequencyArray = new Float32Array(an.frequencyBinCount);
+
+// Uncomment this to check what the analyser is doing.
+// debugCanvas(an);
+
+mediaElementSource.connect(an)
+
+audioElement.play();
+// We want to check the we have the expected audio for at least two loop of
+// the HTMLMediaElement, piped into an AudioContext. The file is ten seconds,
+// and we use the default FFT size.
+var lastCurrentTime = 0;
+var loopCount = 0;
+audioElement.onplaying = function() {
+ audioElement.ontimeupdate = function() {
+ // We don't run the analysis when close to loop point or at the
+ // beginning, since looping is not seamless, there could be an
+ // unpredictable amount of silence
+ var rv = checkFrequency(an);
+ info("currentTime: " + audioElement.currentTime);
+ if (audioElement.currentTime < 4 ||
+ audioElement.currentTime > 8){
+ return;
+ }
+ if (!rv) {
+ ok(false, "Found unexpected noise during analysis.");
+ audioElement.ontimeupdate = null;
+ audioElement.onplaying = null;
+ ac.close();
+ audioElement.src = '';
+ SimpleTest.finish()
+ return;
+ }
+ ok(true, "Found correct audio signal during analysis");
+ info(lastCurrentTime + " " + audioElement.currentTime);
+ if (lastCurrentTime > audioElement.currentTime) {
+ info("loopCount: " + loopCount);
+ if (loopCount > 1) {
+ audioElement.ontimeupdate = null;
+ audioElement.onplaying = null;
+ ac.close();
+ audioElement.src = '';
+ SimpleTest.finish();
+ }
+ lastCurrentTime = audioElement.currentTime;
+ loopCount++;
+ } else {
+ lastCurrentTime = audioElement.currentTime;
+ }
+ }
+}
+
+</script>
diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html
new file mode 100644
index 0000000000..5ee12b16ad
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html
@@ -0,0 +1,66 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaElementAudioSourceNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+function test() {
+ var audio = new Audio("small-shot.ogg");
+ var context = new AudioContext();
+ var node = context.createMediaElementSource(audio);
+ var sp = context.createScriptProcessor(2048, 1);
+ node.connect(sp);
+ var nonzeroSampleCount = 0;
+ var complete = false;
+ var iterationCount = 0;
+
+ var srcWrapped = SpecialPowers.wrap(node);
+ ok("passThrough" in srcWrapped, "MediaElementAudioSourceNode should support the passThrough API");
+ srcWrapped.passThrough = true;
+
+ // This test ensures we receive at least expectedSampleCount nonzero samples
+ function processSamples(e) {
+ if (complete) {
+ return;
+ }
+
+ if (iterationCount == 0) {
+ // Don't start playing the audio until the AudioContext stuff is connected
+ // and running.
+ audio.play();
+ }
+ ++iterationCount;
+
+ var buf = e.inputBuffer.getChannelData(0);
+ var nonzeroSamplesThisBuffer = 0;
+ for (var i = 0; i < buf.length; ++i) {
+ if (buf[i] != 0) {
+ ++nonzeroSamplesThisBuffer;
+ }
+ }
+ nonzeroSampleCount += nonzeroSamplesThisBuffer;
+ if (iterationCount == 10) {
+ is(nonzeroSampleCount, 0, "The input must be silence");
+ SimpleTest.finish();
+ complete = true;
+ }
+ }
+
+ audio.oncanplaythrough = function() {
+ sp.onaudioprocess = processSamples;
+ };
+}
+
+SpecialPowers.pushPrefEnv({"set": [["media.preload.default", 2], ["media.preload.auto", 3]]}, test);
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html
new file mode 100644
index 0000000000..dcb85f12cb
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html
@@ -0,0 +1,70 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaElementAudioSourceNode before "loadedmetadata"</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+var video = document.createElement("video");
+function test() {
+ video.src = "audiovideo.mp4";
+
+ var context = new AudioContext();
+ var complete = false;
+
+ video.onended = () => {
+ if (complete) {
+ return;
+ }
+
+ complete = true;
+ ok(false, "Video ended without any samples seen");
+ SimpleTest.finish();
+ };
+
+ video.ontimeupdate = () => {
+ info("Timeupdate: " + video.currentTime);
+ };
+
+ var node = context.createMediaElementSource(video);
+ var sp = context.createScriptProcessor(2048, 1);
+ node.connect(sp);
+
+ // This test ensures we receive some nonzero samples when we capture to
+ // WebAudio before "loadedmetadata".
+ sp.onaudioprocess = e => {
+ if (complete) {
+ return;
+ }
+
+ var buf = e.inputBuffer.getChannelData(0);
+ for (var i = 0; i < buf.length; ++i) {
+ if (buf[i] != 0) {
+ complete = true;
+ ok(true, "Got non-zero samples");
+ SimpleTest.finish();
+ return;
+ }
+ }
+ };
+
+ video.play();
+}
+
+if (video.canPlayType("video/mp4")) {
+ test();
+} else {
+ ok(true, "MP4 not supported. Skipping.");
+ SimpleTest.finish();
+}
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html b/dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html
new file mode 100644
index 0000000000..fd0ce8141b
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test MediaStreamAudioDestinationNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<audio id="audioelem"></audio>
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("This test uses a live media element so it needs to wait for the media stack to do some work.");
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+
+ var dest = new MediaStreamAudioDestinationNode(context);
+ source.connect(dest);
+
+ var elem = document.getElementById('audioelem');
+ elem.srcObject = dest.stream;
+ elem.onloadedmetadata = function() {
+ ok(true, "got metadata event");
+ setTimeout(function() {
+ is(elem.played.length, 1, "should have a played interval");
+ is(elem.played.start(0), 0, "should have played immediately");
+ isnot(elem.played.end(0), 0, "should have played for a non-zero interval");
+
+ // This will end the media element.
+ dest.stream.getTracks()[0].stop();
+ }, 2000);
+ };
+ elem.onended = function() {
+ ok(true, "media element ended after destination track.stop()");
+ SimpleTest.finish();
+ };
+
+ source.start(0);
+ elem.play();
+});
+</script>
diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html
new file mode 100644
index 0000000000..eaa8a564b9
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamAudioSourceNode processing is correct</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function createBuffer(context) {
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ buffer.getChannelData(1)[i] = -buffer.getChannelData(0)[i];
+ }
+ return buffer;
+}
+
+var gTest = {
+ length: 2048,
+ skipOfflineContextTests: true,
+ createGraph(context) {
+ var sourceGraph = new AudioContext();
+ var source = sourceGraph.createBufferSource();
+ source.buffer = createBuffer(context);
+ var dest = sourceGraph.createMediaStreamDestination();
+ source.connect(dest);
+ source.start(0);
+
+ var mediaStreamSource = new MediaStreamAudioSourceNode(context, { mediaStream: dest.stream });
+ // channelCount and channelCountMode should have no effect
+ mediaStreamSource.channelCount = 1;
+ mediaStreamSource.channelCountMode = "explicit";
+ return mediaStreamSource;
+ },
+ createExpectedBuffers(context) {
+ return createBuffer(context);
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
new file mode 100644
index 0000000000..d79ce50ab8
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+var audio = new Audio("http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg");
+audio.load();
+var context = new AudioContext();
+audio.onloadedmetadata = function() {
+ var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
+ var sp = context.createScriptProcessor(2048, 1);
+ node.connect(sp);
+ var nonzeroSampleCount = 0;
+ var complete = false;
+ var iterationCount = 0;
+
+ // This test ensures we receive at least expectedSampleCount nonzero samples
+ function processSamples(e) {
+ if (complete) {
+ return;
+ }
+
+ if (iterationCount == 0) {
+ // Don't start playing the audio until the AudioContext stuff is connected
+ // and running.
+ audio.play();
+ }
+ ++iterationCount;
+
+ var buf = e.inputBuffer.getChannelData(0);
+ var nonzeroSamplesThisBuffer = 0;
+ for (var i = 0; i < buf.length; ++i) {
+ if (buf[i] != 0) {
+ ++nonzeroSamplesThisBuffer;
+ }
+ }
+ is(nonzeroSamplesThisBuffer, 0,
+ "Checking all samples are zero");
+ if (iterationCount >= 20) {
+ SimpleTest.finish();
+ complete = true;
+ }
+ }
+
+ audio.oncanplaythrough = function() {
+ sp.onaudioprocess = processSamples;
+ };
+}
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html
new file mode 100644
index 0000000000..7920af9f7b
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html
@@ -0,0 +1,116 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test that MediaStreamAudioSourceNode and its input MediaStream stays alive while there are active tracks</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("gUM and WebAudio data is async to main thread. " +
+ "We need a timeout to see that something does " +
+ "NOT happen to data.");
+
+let context = new AudioContext();
+let analyser = context.createAnalyser();
+
+function wait(millis, resolveWithThis) {
+ return new Promise(resolve => setTimeout(() => resolve(resolveWithThis), millis));
+}
+
+function binIndexForFrequency(frequency) {
+ return 1 + Math.round(frequency * analyser.fftSize / context.sampleRate);
+}
+
+function waitForAudio(analysisFunction, cancelPromise) {
+ let data = new Uint8Array(analyser.frequencyBinCount);
+ let cancelled = false;
+ let cancelledMsg = "";
+ cancelPromise.then(msg => {
+ cancelled = true;
+ cancelledMsg = msg;
+ });
+ return new Promise((resolve, reject) => {
+ let loop = () => {
+ analyser.getByteFrequencyData(data);
+ if (cancelled) {
+ reject(new Error("waitForAudio cancelled: " + cancelledMsg));
+ return;
+ }
+ if (analysisFunction(data)) {
+ resolve();
+ return;
+ }
+ requestAnimationFrame(loop);
+ };
+ loop();
+ });
+}
+
+async function test(sourceNode) {
+ try {
+ await analyser.connect(context.destination);
+
+ ok(true, "Waiting for audio to pass through the analyser")
+ await waitForAudio(arr => arr[binIndexForFrequency(1000)] > 200,
+ wait(60000, "Timeout waiting for audio"));
+
+ ok(true, "Audio was detected by the analyser. Forcing CC.");
+ SpecialPowers.forceCC();
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+ SpecialPowers.forceGC();
+
+ info("Checking that GC didn't destroy the stream or source node");
+ await waitForAudio(arr => arr[binIndexForFrequency(1000)] < 50,
+ wait(5000, "Timeout waiting for GC (timeout OK)"))
+ .then(() => Promise.reject("Audio stopped unexpectedly"),
+ () => Promise.resolve());
+
+ ok(true, "Audio is still flowing");
+ } catch(e) {
+ ok(false, "Error executing test: " + e + (e.stack ? "\n" + e.stack : ""));
+ SimpleTest.finish();
+ }
+}
+
+(async function() {
+ try {
+ await SpecialPowers.pushPrefEnv({
+ set: [
+ // This test expects the fake audio device, specifically for the tones
+ // it outputs. Explicitly disable the audio loopback device and enable
+ // fake streams.
+ ['media.audio_loopback_dev', ''],
+ ['media.navigator.streams.fake', true],
+ ['media.navigator.permission.disabled', true]
+ ]
+ });
+
+ // Test stream source GC
+ let stream = await navigator.mediaDevices.getUserMedia({audio: true});
+ let source = context.createMediaStreamSource(stream);
+ stream = null;
+ source.connect(analyser);
+ await test(source);
+
+ // Test track source GC
+ stream = await navigator.mediaDevices.getUserMedia({audio: true});
+ source = context.createMediaStreamTrackSource(stream.getAudioTracks()[0]);
+ stream = null;
+ source.connect(analyser);
+ await test(source);
+ } catch(e) {
+ ok(false, `Error executing test: ${e}${e.stack ? "\n" + e.stack : ""}`);
+ } finally {
+ context.close();
+ SimpleTest.finish();
+ }
+})();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html
new file mode 100644
index 0000000000..379bfdbc6a
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamAudioSourceNode passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function createBuffer(context, delay) {
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048 - delay; ++i) {
+ buffer.getChannelData(0)[i + delay] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ buffer.getChannelData(1)[i + delay] = -buffer.getChannelData(0)[i + delay];
+ }
+ return buffer;
+}
+
+var gTest = {
+ length: 2048,
+ skipOfflineContextTests: true,
+ createGraph(context) {
+ var sourceGraph = new AudioContext();
+ var source = sourceGraph.createBufferSource();
+ source.buffer = createBuffer(context, 0);
+ var dest = sourceGraph.createMediaStreamDestination();
+ source.connect(dest);
+ source.start(0);
+
+ var mediaStreamSource = context.createMediaStreamSource(dest.stream);
+ // channelCount and channelCountMode should have no effect
+ mediaStreamSource.channelCount = 1;
+ mediaStreamSource.channelCountMode = "explicit";
+
+ var srcWrapped = SpecialPowers.wrap(mediaStreamSource);
+ ok("passThrough" in srcWrapped, "MediaStreamAudioSourceNode should support the passThrough API");
+ srcWrapped.passThrough = true;
+
+ return mediaStreamSource;
+ },
+ createExpectedBuffers(context) {
+ return context.createBuffer(2, 2048, context.sampleRate);
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html
new file mode 100644
index 0000000000..efacf1ecc5
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html
@@ -0,0 +1,74 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamAudioSourceNode processing is correct</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+function test() {
+ var audio = new Audio("small-shot.ogg");
+ var context = new AudioContext();
+ var expectedMinNonzeroSampleCount;
+ var expectedMaxNonzeroSampleCount;
+ var nonzeroSampleCount = 0;
+ var complete = false;
+ var iterationCount = 0;
+
+ // This test ensures we receive at least expectedSampleCount nonzero samples
+ function processSamples(e) {
+ if (complete) {
+ return;
+ }
+
+ if (iterationCount == 0) {
+ // Don't start playing the audio until the AudioContext stuff is connected
+ // and running.
+ audio.play();
+ }
+ ++iterationCount;
+
+ var buf = e.inputBuffer.getChannelData(0);
+ var nonzeroSamplesThisBuffer = 0;
+ for (var i = 0; i < buf.length; ++i) {
+ if (buf[i] != 0) {
+ ++nonzeroSamplesThisBuffer;
+ }
+ }
+ nonzeroSampleCount += nonzeroSamplesThisBuffer;
+ is(e.inputBuffer.numberOfChannels, 1,
+ "Checking data channel count (nonzeroSamplesThisBuffer=" +
+ nonzeroSamplesThisBuffer + ")");
+ ok(nonzeroSampleCount <= expectedMaxNonzeroSampleCount,
+ "Too many nonzero samples (got " + nonzeroSampleCount + ", expected max " + expectedMaxNonzeroSampleCount + ")");
+ if (nonzeroSampleCount >= expectedMinNonzeroSampleCount &&
+ nonzeroSamplesThisBuffer == 0) {
+ ok(true,
+ "Check received enough nonzero samples (got " + nonzeroSampleCount + ", expected min " + expectedMinNonzeroSampleCount + ")");
+ SimpleTest.finish();
+ complete = true;
+ }
+ }
+
+ audio.onloadedmetadata = function() {
+ var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
+ var sp = context.createScriptProcessor(2048, 1, 0);
+ node.connect(sp);
+ // Use a fuzz factor of 100 to account for samples that just happen to be zero
+ expectedMinNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) - 100;
+ expectedMaxNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) + 500;
+ sp.onaudioprocess = processSamples;
+ };
+}
+
+SpecialPowers.pushPrefEnv({"set": [["media.preload.default", 2], ["media.preload.auto", 3]]}, test);
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNode.html b/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNode.html
new file mode 100644
index 0000000000..350e9e0fab
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNode.html
@@ -0,0 +1,54 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamTrackAudioSourceNode processing is correct</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function createBuffer(context) {
+ let buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (let i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ buffer.getChannelData(1)[i] = -buffer.getChannelData(0)[i];
+ }
+ return buffer;
+}
+
+let gTest = {
+ length: 2048,
+ skipOfflineContextTests: true,
+ createGraph(context) {
+ let sourceGraph = new AudioContext();
+ let source = sourceGraph.createBufferSource();
+ source.buffer = createBuffer(context);
+ let dest = sourceGraph.createMediaStreamDestination();
+ source.connect(dest);
+
+ // Extract first audio track from dest.stream
+ let track = dest.stream.getAudioTracks()[0];
+
+ source.start(0);
+
+ let mediaStreamTrackSource = new MediaStreamTrackAudioSourceNode(context, { mediaStreamTrack: track });
+ // channelCount and channelCountMode should have no effect
+ mediaStreamTrackSource.channelCount = 1;
+ mediaStreamTrackSource.channelCountMode = "explicit";
+ return mediaStreamTrackSource;
+ },
+ createExpectedBuffers(context) {
+ return createBuffer(context);
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeCrossOrigin.html b/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeCrossOrigin.html
new file mode 100644
index 0000000000..313cd424c0
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeCrossOrigin.html
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+<title>Test MediaStreamTrackAudioSourceNode doesn't get data from cross-origin media resources</title>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+const CROSS_ORIGIN_URL = "http://example.org:80/tests/dom/media/webaudio/test/sine-440-10s.opus"
+let iterationCount = 0;
+let context = null;
+
+function processSamples(e) {
+ ++iterationCount;
+
+ let buf = e.inputBuffer.getChannelData(0);
+ let nonzeroSamplesThisBuffer = 0;
+ for (let i = 0; i < buf.length; ++i) {
+ if (buf[i] != 0) {
+ ++nonzeroSamplesThisBuffer;
+ }
+ }
+ is(nonzeroSamplesThisBuffer, 0,
+ "a source that is cross origin cannot be inspected by Web Audio");
+
+ if (iterationCount == 40) {
+ sp.onaudioprocess = null;
+ context.close();
+ SimpleTest.finish();
+ }
+}
+
+let audio = new Audio();
+audio.src = CROSS_ORIGIN_URL;
+audio.onloadedmetadata = function () {
+ context = new AudioContext();
+ let stream = audio.mozCaptureStream();
+ let track = stream.getAudioTracks()[0];
+ let node = context.createMediaStreamTrackSource(track);
+ node.connect(context.destination);
+ sp = context.createScriptProcessor(2048, 1);
+ sp.onaudioprocess = processSamples;
+ node.connect(sp);
+}
+
+</script>
+</pre>
+</body>
diff --git a/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeVideo.html b/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeVideo.html
new file mode 100644
index 0000000000..b98cfc6a4f
--- /dev/null
+++ b/dom/media/webaudio/test/test_mediaStreamTrackAudioSourceNodeVideo.html
@@ -0,0 +1,27 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset="utf-8">
+<head>
+ <title>Test MediaStreamTrackAudioSourceNode throw video track</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="/tests/dom/media/webaudio/test/webaudio.js"></script>
+ <script type="text/javascript" src="/tests/dom/media/webrtc/tests/mochitests/head.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+ let context = new AudioContext();
+ let canvas = document.createElement("canvas");
+ canvas.getContext("2d");
+ let track = canvas.captureStream().getTracks()[0];
+
+ expectException(() => {
+ let mediaStreamTrackSource = new MediaStreamTrackAudioSourceNode(
+ context,
+ { mediaStreamTrack: track });
+ }, DOMException.INVALID_STATE_ERR);
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_mixingRules.html b/dom/media/webaudio/test/test_mixingRules.html
new file mode 100644
index 0000000000..719175fbfb
--- /dev/null
+++ b/dom/media/webaudio/test/test_mixingRules.html
@@ -0,0 +1,402 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Testcase for AudioNode channel up-mix/down-mix rules</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+
+<body>
+
+<script>
+
+// This test is based on http://src.chromium.org/viewvc/blink/trunk/LayoutTests/webaudio/audionode-channel-rules.html
+
+var context = null;
+var sp = null;
+var renderNumberOfChannels = 8;
+var singleTestFrameLength = 8;
+var testBuffers;
+
+// A list of connections to an AudioNode input, each of which is to be used in one or more specific test cases.
+// Each element in the list is a string, with the number of connections corresponding to the length of the string,
+// and each character in the string is from '1' to '8' representing a 1 to 8 channel connection (from an AudioNode output).
+// For example, the string "128" means 3 connections, having 1, 2, and 8 channels respectively.
+var connectionsList = [];
+for (var i = 1; i <= 8; ++i) {
+ connectionsList.push(i.toString());
+ for (var j = 1; j <= 8; ++j) {
+ connectionsList.push(i.toString() + j.toString());
+ }
+}
+
+// A list of mixing rules, each of which will be tested against all of the connections in connectionsList.
+var mixingRulesList = [
+ {channelCount: 1, channelCountMode: "max", channelInterpretation: "speakers"},
+ {channelCount: 2, channelCountMode: "clamped-max", channelInterpretation: "speakers"},
+ {channelCount: 3, channelCountMode: "clamped-max", channelInterpretation: "speakers"},
+ {channelCount: 4, channelCountMode: "clamped-max", channelInterpretation: "speakers"},
+ {channelCount: 5, channelCountMode: "clamped-max", channelInterpretation: "speakers"},
+ {channelCount: 6, channelCountMode: "clamped-max", channelInterpretation: "speakers"},
+ {channelCount: 7, channelCountMode: "clamped-max", channelInterpretation: "speakers"},
+ {channelCount: 2, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 3, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 4, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 5, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 6, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 7, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 8, channelCountMode: "explicit", channelInterpretation: "speakers"},
+ {channelCount: 1, channelCountMode: "max", channelInterpretation: "discrete"},
+ {channelCount: 2, channelCountMode: "clamped-max", channelInterpretation: "discrete"},
+ {channelCount: 3, channelCountMode: "clamped-max", channelInterpretation: "discrete"},
+ {channelCount: 4, channelCountMode: "clamped-max", channelInterpretation: "discrete"},
+ {channelCount: 5, channelCountMode: "clamped-max", channelInterpretation: "discrete"},
+ {channelCount: 6, channelCountMode: "clamped-max", channelInterpretation: "discrete"},
+ {channelCount: 3, channelCountMode: "explicit", channelInterpretation: "discrete"},
+ {channelCount: 4, channelCountMode: "explicit", channelInterpretation: "discrete"},
+ {channelCount: 5, channelCountMode: "explicit", channelInterpretation: "discrete"},
+ {channelCount: 6, channelCountMode: "explicit", channelInterpretation: "discrete"},
+ {channelCount: 7, channelCountMode: "explicit", channelInterpretation: "discrete"},
+ {channelCount: 8, channelCountMode: "explicit", channelInterpretation: "discrete"},
+];
+
+var numberOfTests = mixingRulesList.length * connectionsList.length;
+
+// Create an n-channel buffer, with all sample data zero except for a shifted impulse.
+// The impulse position depends on the channel index.
+// For example, for a 4-channel buffer:
+// channel0: 1 0 0 0 0 0 0 0
+// channel1: 0 1 0 0 0 0 0 0
+// channel2: 0 0 1 0 0 0 0 0
+// channel3: 0 0 0 1 0 0 0 0
+function createTestBuffer(numberOfChannels) {
+ var buffer = context.createBuffer(numberOfChannels, singleTestFrameLength, context.sampleRate);
+ for (var i = 0; i < numberOfChannels; ++i) {
+ var data = buffer.getChannelData(i);
+ data[i] = 1;
+ }
+ return buffer;
+}
+
+// Discrete channel interpretation mixing:
+// https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#UpMix
+// up-mix by filling channels until they run out then ignore remaining dest channels.
+// down-mix by filling as many channels as possible, then dropping remaining source channels.
+function discreteSum(sourceBuffer, destBuffer) {
+ if (sourceBuffer.length != destBuffer.length) {
+ is(sourceBuffer.length, destBuffer.length, "source and destination buffers should have the same length");
+ }
+
+ var numberOfChannels = Math.min(sourceBuffer.numberOfChannels, destBuffer.numberOfChannels);
+ var length = sourceBuffer.length;
+
+ for (var c = 0; c < numberOfChannels; ++c) {
+ var source = sourceBuffer.getChannelData(c);
+ var dest = destBuffer.getChannelData(c);
+ for (var i = 0; i < length; ++i) {
+ dest[i] += source[i];
+ }
+ }
+}
+
+// Speaker channel interpretation mixing:
+// https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#UpMix
+// eslint-disable-next-line complexity
+function speakersSum(sourceBuffer, destBuffer)
+{
+ var numberOfSourceChannels = sourceBuffer.numberOfChannels;
+ var numberOfDestinationChannels = destBuffer.numberOfChannels;
+ var length = destBuffer.length;
+
+ if ((numberOfDestinationChannels == 2 && numberOfSourceChannels == 1) ||
+ (numberOfDestinationChannels == 4 && numberOfSourceChannels == 1)) {
+ // Handle mono -> stereo/Quad case (summing mono channel into both left and right).
+ var source = sourceBuffer.getChannelData(0);
+ var destL = destBuffer.getChannelData(0);
+ var destR = destBuffer.getChannelData(1);
+
+ for (var i = 0; i < length; ++i) {
+ destL[i] += source[i];
+ destR[i] += source[i];
+ }
+ } else if ((numberOfDestinationChannels == 4 && numberOfSourceChannels == 2) ||
+ (numberOfDestinationChannels == 6 && numberOfSourceChannels == 2)) {
+ // Handle stereo -> Quad/5.1 case (summing left and right channels into the output's left and right).
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var destL = destBuffer.getChannelData(0);
+ var destR = destBuffer.getChannelData(1);
+
+ for (var i = 0; i < length; ++i) {
+ destL[i] += sourceL[i];
+ destR[i] += sourceR[i];
+ }
+ } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 2) {
+ // Handle stereo -> mono case. output += 0.5 * (input.L + input.R).
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var dest = destBuffer.getChannelData(0);
+
+ for (var i = 0; i < length; ++i) {
+ dest[i] += 0.5 * (sourceL[i] + sourceR[i]);
+ }
+ } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 4) {
+ // Handle Quad -> mono case. output += 0.25 * (input.L + input.R + input.SL + input.SR).
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var sourceSL = sourceBuffer.getChannelData(2);
+ var sourceSR = sourceBuffer.getChannelData(3);
+ var dest = destBuffer.getChannelData(0);
+
+ for (var i = 0; i < length; ++i) {
+ dest[i] += 0.25 * (sourceL[i] + sourceR[i] + sourceSL[i] + sourceSR[i]);
+ }
+ } else if (numberOfDestinationChannels == 2 && numberOfSourceChannels == 4) {
+ // Handle Quad -> stereo case. outputLeft += 0.5 * (input.L + input.SL),
+ // outputRight += 0.5 * (input.R + input.SR).
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var sourceSL = sourceBuffer.getChannelData(2);
+ var sourceSR = sourceBuffer.getChannelData(3);
+ var destL = destBuffer.getChannelData(0);
+ var destR = destBuffer.getChannelData(1);
+
+ for (var i = 0; i < length; ++i) {
+ destL[i] += 0.5 * (sourceL[i] + sourceSL[i]);
+ destR[i] += 0.5 * (sourceR[i] + sourceSR[i]);
+ }
+ } else if (numberOfDestinationChannels == 6 && numberOfSourceChannels == 4) {
+ // Handle Quad -> 5.1 case. outputLeft += (inputL, inputR, 0, 0, inputSL, inputSR)
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var sourceSL = sourceBuffer.getChannelData(2);
+ var sourceSR = sourceBuffer.getChannelData(3);
+ var destL = destBuffer.getChannelData(0);
+ var destR = destBuffer.getChannelData(1);
+ var destSL = destBuffer.getChannelData(4);
+ var destSR = destBuffer.getChannelData(5);
+
+ for (var i = 0; i < length; ++i) {
+ destL[i] += sourceL[i];
+ destR[i] += sourceR[i];
+ destSL[i] += sourceSL[i];
+ destSR[i] += sourceSR[i];
+ }
+ } else if (numberOfDestinationChannels == 6 && numberOfSourceChannels == 1) {
+ // Handle mono -> 5.1 case, sum mono channel into center.
+ var source = sourceBuffer.getChannelData(0);
+ var dest = destBuffer.getChannelData(2);
+
+ for (var i = 0; i < length; ++i) {
+ dest[i] += source[i];
+ }
+ } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 6) {
+ // Handle 5.1 -> mono.
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var sourceC = sourceBuffer.getChannelData(2);
+ // skip LFE for now, according to current spec.
+ var sourceSL = sourceBuffer.getChannelData(4);
+ var sourceSR = sourceBuffer.getChannelData(5);
+ var dest = destBuffer.getChannelData(0);
+
+ for (var i = 0; i < length; ++i) {
+ dest[i] += 0.7071 * (sourceL[i] + sourceR[i]) + sourceC[i] + 0.5 * (sourceSL[i] + sourceSR[i]);
+ }
+ } else if (numberOfDestinationChannels == 2 && numberOfSourceChannels == 6) {
+ // Handle 5.1 -> stereo.
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var sourceC = sourceBuffer.getChannelData(2);
+ // skip LFE for now, according to current spec.
+ var sourceSL = sourceBuffer.getChannelData(4);
+ var sourceSR = sourceBuffer.getChannelData(5);
+ var destL = destBuffer.getChannelData(0);
+ var destR = destBuffer.getChannelData(1);
+
+ for (var i = 0; i < length; ++i) {
+ destL[i] += sourceL[i] + 0.7071 * (sourceC[i] + sourceSL[i]);
+ destR[i] += sourceR[i] + 0.7071 * (sourceC[i] + sourceSR[i]);
+ }
+ } else if (numberOfDestinationChannels == 4 && numberOfSourceChannels == 6) {
+ // Handle 5.1 -> Quad.
+ var sourceL = sourceBuffer.getChannelData(0);
+ var sourceR = sourceBuffer.getChannelData(1);
+ var sourceC = sourceBuffer.getChannelData(2);
+ // skip LFE for now, according to current spec.
+ var sourceSL = sourceBuffer.getChannelData(4);
+ var sourceSR = sourceBuffer.getChannelData(5);
+ var destL = destBuffer.getChannelData(0);
+ var destR = destBuffer.getChannelData(1);
+ var destSL = destBuffer.getChannelData(2);
+ var destSR = destBuffer.getChannelData(3);
+
+ for (var i = 0; i < length; ++i) {
+ destL[i] += sourceL[i] + 0.7071 * sourceC[i];
+ destR[i] += sourceR[i] + 0.7071 * sourceC[i];
+ destSL[i] += sourceSL[i];
+ destSR[i] += sourceSR[i];
+ }
+ } else {
+ // Fallback for unknown combinations.
+ discreteSum(sourceBuffer, destBuffer);
+ }
+}
+
+function scheduleTest(testNumber, connections, channelCount, channelCountMode, channelInterpretation) {
+ var mixNode = context.createGain();
+ mixNode.channelCount = channelCount;
+ mixNode.channelCountMode = channelCountMode;
+ mixNode.channelInterpretation = channelInterpretation;
+ mixNode.connect(sp);
+
+ for (var i = 0; i < connections.length; ++i) {
+ var connectionNumberOfChannels = connections.charCodeAt(i) - "0".charCodeAt(0);
+
+ var source = context.createBufferSource();
+ // Get a buffer with the right number of channels, converting from 1-based to 0-based index.
+ var buffer = testBuffers[connectionNumberOfChannels - 1];
+ source.buffer = buffer;
+ source.connect(mixNode);
+
+ // Start at the right offset.
+ var sampleFrameOffset = testNumber * singleTestFrameLength;
+ var time = sampleFrameOffset / context.sampleRate;
+ source.start(time);
+ }
+}
+
+function computeNumberOfChannels(connections, channelCount, channelCountMode) {
+ if (channelCountMode == "explicit")
+ return channelCount;
+
+ var computedNumberOfChannels = 1; // Must have at least one channel.
+
+ // Compute "computedNumberOfChannels" based on all the connections.
+ for (var i = 0; i < connections.length; ++i) {
+ var connectionNumberOfChannels = connections.charCodeAt(i) - "0".charCodeAt(0);
+ computedNumberOfChannels = Math.max(computedNumberOfChannels, connectionNumberOfChannels);
+ }
+
+ if (channelCountMode == "clamped-max")
+ computedNumberOfChannels = Math.min(computedNumberOfChannels, channelCount);
+
+ return computedNumberOfChannels;
+}
+
+function checkTestResult(renderedBuffer, testNumber, connections, channelCount, channelCountMode, channelInterpretation) {
+ var computedNumberOfChannels = computeNumberOfChannels(connections, channelCount, channelCountMode);
+
+ // Create a zero-initialized silent AudioBuffer with computedNumberOfChannels.
+ var destBuffer = context.createBuffer(computedNumberOfChannels, singleTestFrameLength, context.sampleRate);
+
+ // Mix all of the connections into the destination buffer.
+ for (var i = 0; i < connections.length; ++i) {
+ var connectionNumberOfChannels = connections.charCodeAt(i) - "0".charCodeAt(0);
+ var sourceBuffer = testBuffers[connectionNumberOfChannels - 1]; // convert from 1-based to 0-based index
+
+ if (channelInterpretation == "speakers") {
+ speakersSum(sourceBuffer, destBuffer);
+ } else if (channelInterpretation == "discrete") {
+ discreteSum(sourceBuffer, destBuffer);
+ } else {
+ ok(false, "Invalid channel interpretation!");
+ }
+ }
+
+ // Validate that destBuffer matches the rendered output.
+ // We need to check the rendered output at a specific sample-frame-offset corresponding
+ // to the specific test case we're checking for based on testNumber.
+
+ var sampleFrameOffset = testNumber * singleTestFrameLength;
+ for (var c = 0; c < renderNumberOfChannels; ++c) {
+ var renderedData = renderedBuffer.getChannelData(c);
+ for (var frame = 0; frame < singleTestFrameLength; ++frame) {
+ var renderedValue = renderedData[frame + sampleFrameOffset];
+
+ var expectedValue = 0;
+ if (c < destBuffer.numberOfChannels) {
+ var expectedData = destBuffer.getChannelData(c);
+ expectedValue = expectedData[frame];
+ }
+
+ if (Math.abs(renderedValue - expectedValue) > 1e-4) {
+ var s = "connections: " + connections + ", " + channelCountMode;
+
+ // channelCount is ignored in "max" mode.
+ if (channelCountMode == "clamped-max" || channelCountMode == "explicit") {
+ s += "(" + channelCount + ")";
+ }
+
+ s += ", " + channelInterpretation + ". ";
+
+ var message = s + "rendered: " + renderedValue + " expected: " + expectedValue + " channel: " + c + " frame: " + frame;
+ is(renderedValue, expectedValue, message);
+ }
+ }
+ }
+}
+
+function checkResult(event) {
+ var buffer = event.inputBuffer;
+
+ // Sanity check result.
+ ok(buffer.length != numberOfTests * singleTestFrameLength ||
+ buffer.numberOfChannels != renderNumberOfChannels, "Sanity check");
+
+ // Check all the tests.
+ var testNumber = 0;
+ for (var m = 0; m < mixingRulesList.length; ++m) {
+ var mixingRules = mixingRulesList[m];
+ for (var i = 0; i < connectionsList.length; ++i, ++testNumber) {
+ checkTestResult(buffer, testNumber, connectionsList[i], mixingRules.channelCount, mixingRules.channelCountMode, mixingRules.channelInterpretation);
+ }
+ }
+
+ sp.onaudioprocess = null;
+ SimpleTest.finish();
+}
+
+SimpleTest.waitForExplicitFinish();
+function runTest() {
+ // Create 8-channel offline audio context.
+ // Each test will render 8 sample-frames starting at sample-frame position testNumber * 8.
+ var totalFrameLength = numberOfTests * singleTestFrameLength;
+ context = new AudioContext();
+ var nextPowerOfTwo = 256;
+ while (nextPowerOfTwo < totalFrameLength) {
+ nextPowerOfTwo *= 2;
+ }
+ sp = context.createScriptProcessor(nextPowerOfTwo, renderNumberOfChannels);
+
+ // Set destination to discrete mixing.
+ sp.channelCount = renderNumberOfChannels;
+ sp.channelCountMode = "explicit";
+ sp.channelInterpretation = "discrete";
+
+ // Create test buffers from 1 to 8 channels.
+ testBuffers = new Array();
+ for (var i = 0; i < renderNumberOfChannels; ++i) {
+ testBuffers[i] = createTestBuffer(i + 1);
+ }
+
+ // Schedule all the tests.
+ var testNumber = 0;
+ for (var m = 0; m < mixingRulesList.length; ++m) {
+ var mixingRules = mixingRulesList[m];
+ for (var i = 0; i < connectionsList.length; ++i, ++testNumber) {
+ scheduleTest(testNumber, connectionsList[i], mixingRules.channelCount, mixingRules.channelCountMode, mixingRules.channelInterpretation);
+ }
+ }
+
+ // Render then check results.
+ sp.onaudioprocess = checkResult;
+}
+
+runTest();
+
+</script>
+
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_nodeCreationDocumentGone.html b/dom/media/webaudio/test/test_nodeCreationDocumentGone.html
new file mode 100644
index 0000000000..07a4f7a97d
--- /dev/null
+++ b/dom/media/webaudio/test/test_nodeCreationDocumentGone.html
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.requestCompleteLog();
+SimpleTest.waitForExplicitFinish();
+
+var a = window.open("file_nodeCreationDocumentGone.html");
+a.onbeforeunload = function() {
+ setTimeout(function(){
+ try {
+ a.context.createScriptProcessor(512, 1, 1);
+ } catch(e) {
+ ok (true,"got exception");
+ }
+ setTimeout(function() {
+ ok (true,"no crash");
+ SimpleTest.finish();
+ }, 0);
+ }, 0);
+}
+
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_nodeToParamConnection.html b/dom/media/webaudio/test/test_nodeToParamConnection.html
new file mode 100644
index 0000000000..8a77e7d0a2
--- /dev/null
+++ b/dom/media/webaudio/test/test_nodeToParamConnection.html
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test connecting an AudioNode to an AudioParam</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ createGraph(context) {
+ var sourceBuffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ sourceBuffer.getChannelData(0)[i] = 1;
+ sourceBuffer.getChannelData(1)[i] = -1;
+ }
+
+ var destination = context.destination;
+
+ var paramSource = context.createBufferSource();
+ paramSource.buffer = this.buffer;
+
+ var source = context.createBufferSource();
+ source.buffer = sourceBuffer;
+
+ var gain = context.createGain();
+
+ paramSource.connect(gain.gain);
+ source.connect(gain);
+
+ paramSource.start(0);
+ source.start(0);
+ return gain;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ for (var j = 0; j < 2; ++j) {
+ this.buffer.getChannelData(j)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate);
+ }
+ }
+ var expectedBuffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ expectedBuffer.getChannelData(0)[i] = 1 + (this.buffer.getChannelData(0)[i] + this.buffer.getChannelData(1)[i]) / 2;
+ expectedBuffer.getChannelData(1)[i] = -(1 + (this.buffer.getChannelData(0)[i] + this.buffer.getChannelData(1)[i]) / 2);
+ }
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html b/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html
new file mode 100644
index 0000000000..b8715c1644
--- /dev/null
+++ b/dom/media/webaudio/test/test_notAllowedToStartAudioContextGC.html
@@ -0,0 +1,57 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test GC for not-allow-to-start audio context</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.requestFlakyTimeout(`Checking that something does not happen`);
+
+SimpleTest.waitForExplicitFinish();
+
+var destId;
+
+function observer(subject, topic, data) {
+ let id = parseInt(data);
+ ok(id != destId, "dropping another node, not the context's destination");
+}
+
+SpecialPowers.addAsyncObserver(observer, "webaudio-node-demise", false);
+SimpleTest.registerCleanupFunction(function() {
+ SpecialPowers.removeAsyncObserver(observer, "webaudio-node-demise");
+});
+
+SpecialPowers.pushPrefEnv({"set": [["media.autoplay.default", SpecialPowers.Ci.nsIAutoplay.BLOCKED],
+ ["media.autoplay.blocking_policy", 0]]},
+ startTest);
+
+function startTest() {
+ info("- create audio context -");
+ let ac = new AudioContext();
+
+ info("- get node Id -");
+ destId = SpecialPowers.getPrivilegedProps(ac.destination, "id");
+
+ info("- trigger GCs -");
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+ SpecialPowers.forceGC();
+
+ info("- after three GCs -");
+
+ // We're doing this async so that we can receive observerservice messages.
+ setTimeout(function() {
+ ok(true, `AudioContext that has been prevented
+ from starting has correctly survived GC`)
+ SimpleTest.finish();
+ }, 1);
+}
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html b/dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html
new file mode 100644
index 0000000000..8ff1deac4b
--- /dev/null
+++ b/dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html
@@ -0,0 +1,42 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test OfflineAudioContext with a channel count less than the specified number</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var ctx = new OfflineAudioContext(2, 100, 22050);
+
+ var buf = ctx.createBuffer(6, 100, ctx.sampleRate);
+ for (var i = 0; i < 6; ++i) {
+ for (var j = 0; j < 100; ++j) {
+ buf.getChannelData(i)[j] = Math.sin(2 * Math.PI * 200 * j / ctx.sampleRate);
+ }
+ }
+
+ var src = ctx.createBufferSource();
+ src.buffer = buf;
+ src.start(0);
+ src.connect(ctx.destination);
+ ctx.destination.channelCountMode = "max";
+ ctx.startRendering();
+ ctx.oncomplete = function(e) {
+ is(e.renderedBuffer.numberOfChannels, 2, "Correct expected number of buffers");
+ compareChannels(e.renderedBuffer.getChannelData(0), buf.getChannelData(0));
+ compareChannels(e.renderedBuffer.getChannelData(1), buf.getChannelData(1));
+
+ SimpleTest.finish();
+ };
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html b/dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html
new file mode 100644
index 0000000000..fa38114e2b
--- /dev/null
+++ b/dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html
@@ -0,0 +1,46 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test OfflineAudioContext with a channel count less than the specified number</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var ctx = new OfflineAudioContext(6, 100, 22050);
+
+ var buf = ctx.createBuffer(2, 100, ctx.sampleRate);
+ for (var i = 0; i < 2; ++i) {
+ for (var j = 0; j < 100; ++j) {
+ buf.getChannelData(i)[j] = Math.sin(2 * Math.PI * 200 * j / ctx.sampleRate);
+ }
+ }
+ var emptyBuffer = ctx.createBuffer(1, 100, ctx.sampleRate);
+
+ var src = ctx.createBufferSource();
+ src.buffer = buf;
+ src.start(0);
+ src.connect(ctx.destination);
+ ctx.destination.channelCountMode = "max";
+ ctx.startRendering();
+ ctx.oncomplete = function(e) {
+ is(e.renderedBuffer.numberOfChannels, 6, "Correct expected number of buffers");
+ compareChannels(e.renderedBuffer.getChannelData(0), buf.getChannelData(0));
+ compareChannels(e.renderedBuffer.getChannelData(1), buf.getChannelData(1));
+ for (var i = 2; i < 6; ++i) {
+ compareChannels(e.renderedBuffer.getChannelData(i), emptyBuffer.getChannelData(0));
+ }
+
+ SimpleTest.finish();
+ };
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_oscillatorNode.html b/dom/media/webaudio/test/test_oscillatorNode.html
new file mode 100644
index 0000000000..e2a47a4e1e
--- /dev/null
+++ b/dom/media/webaudio/test/test_oscillatorNode.html
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the OscillatorNode interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+ var context = new AudioContext();
+ var osc = new OscillatorNode(context);
+
+ is(osc.channelCount, 2, "Oscillator node has 2 input channels by default");
+ is(osc.channelCountMode, "max", "Correct channelCountMode for the Oscillator node");
+ is(osc.channelInterpretation, "speakers", "Correct channelCountInterpretation for the Oscillator node");
+ is(osc.type, "sine", "Correct default type");
+ expectException(function() {
+ osc.type = "custom";
+ }, DOMException.INVALID_STATE_ERR);
+ is(osc.type, "sine", "Cannot set the type to custom");
+ is(osc.frequency.value, 440, "Correct default frequency value");
+ is(osc.detune.value, 0, "Correct default detine value");
+
+ // Make sure that we can set all of the valid type values
+ var types = [
+ "sine",
+ "square",
+ "sawtooth",
+ "triangle",
+ ];
+ for (var i = 0; i < types.length; ++i) {
+ osc.type = types[i];
+ }
+
+ // Verify setPeriodicWave()
+ var real = new Float32Array([1.0, 0.5, 0.25, 0.125]);
+ var imag = new Float32Array([1.0, 0.7, -1.0, 0.5]);
+ osc.setPeriodicWave(context.createPeriodicWave(real, imag));
+ is(osc.type, "custom", "Failed to set custom waveform");
+
+ expectNoException(function() {
+ osc.start();
+ });
+ expectNoException(function() {
+ osc.stop();
+ });
+
+ SimpleTest.finish();
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_oscillatorNode2.html b/dom/media/webaudio/test/test_oscillatorNode2.html
new file mode 100644
index 0000000000..69a6655ff1
--- /dev/null
+++ b/dom/media/webaudio/test/test_oscillatorNode2.html
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test OscillatorNode lifetime and sine phase</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+const signalLength = 2048;
+
+function createOscillator(context) {
+ var osc = context.createOscillator();
+ osc.start(0);
+ osc.stop(signalLength/context.sampleRate);
+ return osc;
+}
+
+function connectUnreferencedOscillator(context, destination) {
+ var osc = createOscillator(context);
+ osc.connect(destination);
+}
+
+var gTest = {
+ length: signalLength,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var blend = context.createGain();
+
+ connectUnreferencedOscillator(context, blend);
+ // Test that the unreferenced oscillator remains alive until it has finished.
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+
+ // Create another sine wave oscillator in negative time, which should
+ // cancel when mixed with the unreferenced oscillator.
+ var oscillator = createOscillator(context);
+ oscillator.frequency.value = -440;
+ oscillator.connect(blend);
+
+ return blend;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html b/dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html
new file mode 100644
index 0000000000..c46c0fea13
--- /dev/null
+++ b/dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the OscillatorNode when the frequency is negative</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+ var types = ["sine",
+ "square",
+ "sawtooth",
+ "triangle"];
+
+ var finished = 0;
+ function finish() {
+ if (++finished == types.length) {
+ SimpleTest.finish();
+ }
+ }
+
+ types.forEach(function(t) {
+ var context = new OfflineAudioContext(1, 256, 44100);
+ var osc = context.createOscillator();
+
+ osc.frequency.value = -440;
+ osc.type = t;
+
+ osc.connect(context.destination);
+ osc.start();
+ context.startRendering().then(function(buffer) {
+ var samples = buffer.getChannelData(0);
+ // This samples the wave form in the middle of the first period, the value
+ // should be negative.
+ ok(samples[Math.floor(44100 / 440 / 4)] < 0., "Phase should be inverted when using a " + t + " waveform");
+ finish();
+ });
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_oscillatorNodePassThrough.html b/dom/media/webaudio/test/test_oscillatorNodePassThrough.html
new file mode 100644
index 0000000000..63c0848d06
--- /dev/null
+++ b/dom/media/webaudio/test/test_oscillatorNodePassThrough.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test Oscillator with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var source = context.createOscillator();
+
+ var srcWrapped = SpecialPowers.wrap(source);
+ ok("passThrough" in srcWrapped, "OscillatorNode should support the passThrough API");
+ srcWrapped.passThrough = true;
+
+ source.start(0);
+ return source;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+
+ return [expectedBuffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_oscillatorNodeStart.html b/dom/media/webaudio/test/test_oscillatorNodeStart.html
new file mode 100644
index 0000000000..4df129170f
--- /dev/null
+++ b/dom/media/webaudio/test/test_oscillatorNodeStart.html
@@ -0,0 +1,38 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the OscillatorNode interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+
+ var context = new AudioContext();
+ var osc = context.createOscillator();
+ var sp = context.createScriptProcessor(0, 1, 0);
+
+ osc.connect(sp);
+
+ sp.onaudioprocess = function (e) {
+ var input = e.inputBuffer.getChannelData(0);
+ var isSilent = true;
+ for (var i = 0; i < input.length; i++) {
+ if (input[i] != 0.0) {
+ isSilent = false;
+ }
+ }
+ sp.onaudioprocess = null;
+ ok(isSilent, "OscillatorNode should be silent before calling start.");
+ SimpleTest.finish();
+ }
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_oscillatorTypeChange.html b/dom/media/webaudio/test/test_oscillatorTypeChange.html
new file mode 100644
index 0000000000..e4b4944703
--- /dev/null
+++ b/dom/media/webaudio/test/test_oscillatorTypeChange.html
@@ -0,0 +1,58 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test OscillatorNode type change after it has started and triangle phase</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+const bufferSize = 1024;
+
+function startTest() {
+ var ctx = new AudioContext();
+
+ var oscillator1 = ctx.createOscillator();
+ oscillator1.connect(ctx.destination);
+ oscillator1.start(0);
+
+ // Assuming the above Web Audio operations have already scheduled an event
+ // to run in stable state and start the graph thread, schedule a subsequent
+ // event to change the type of oscillator1.
+ SimpleTest.executeSoon(function() {
+ oscillator1.type = "triangle";
+
+ // Another triangle wave with -1 gain should cancel the first. This is
+ // starting at the same time as the type change, assuming that the phase
+ // is reset on type change. A negative frequency should achieve the same
+ // as the -1 gain but for bug 916285.
+ var oscillator2 = ctx.createOscillator();
+ oscillator2.type = "triangle";
+ oscillator2.start(0);
+
+ var processor = ctx.createScriptProcessor(bufferSize, 1, 0);
+ oscillator1.connect(processor);
+ var gain = ctx.createGain();
+ gain.gain.value = -1;
+ gain.connect(processor);
+ oscillator2.connect(gain);
+
+ processor.onaudioprocess = function(e) {
+ compareChannels(e.inputBuffer.getChannelData(0),
+ new Float32Array(bufferSize));
+ e.target.onaudioprocess = null;
+ SimpleTest.finish();
+ }
+ });
+};
+
+startTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNode.html b/dom/media/webaudio/test/test_pannerNode.html
new file mode 100644
index 0000000000..7f4d3ea915
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNode.html
@@ -0,0 +1,71 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+function near(a, b, msg) {
+ ok(Math.abs(a - b) < 1e-4, msg);
+}
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var destination = context.destination;
+
+ var source = context.createBufferSource();
+
+ var panner = new PannerNode(context);
+
+ source.buffer = buffer;
+
+ source.connect(panner);
+ panner.connect(destination);
+
+ // Verify default values
+ is(panner.panningModel, "equalpower", "Correct default value for panning model");
+ is(panner.distanceModel, "inverse", "Correct default value for distance model");
+ near(panner.refDistance, 1, "Correct default value for ref distance");
+ near(panner.maxDistance, 10000, "Correct default value for max distance");
+ near(panner.rolloffFactor, 1, "Correct default value for rolloff factor");
+ near(panner.coneInnerAngle, 360, "Correct default value for cone inner angle");
+ near(panner.coneOuterAngle, 360, "Correct default value for cone outer angle");
+ near(panner.coneOuterGain, 0, "Correct default value for cone outer gain");
+ is(panner.channelCount, 2, "panner node has 2 input channels by default");
+ is(panner.channelCountMode, "clamped-max", "Correct channelCountMode for the panner node");
+ is(panner.channelInterpretation, "speakers", "Correct channelCountInterpretation for the panner node");
+
+ panner.setPosition(1, 1, 1);
+ near(panner.positionX.value, 1, "setPosition sets AudioParam properly");
+ near(panner.positionY.value, 1, "setPosition sets AudioParam properly");
+ near(panner.positionZ.value, 1, "setPosition sets AudioParam properly");
+
+ panner.setOrientation(0, 1, 0);
+ near(panner.orientationX.value, 0, "setOrientation sets AudioParam properly");
+ near(panner.orientationY.value, 1, "setOrientation sets AudioParam properly");
+ near(panner.orientationZ.value, 0, "setOrientation sets AudioParam properly");
+
+ source.start(0);
+ SimpleTest.executeSoon(function() {
+ source.stop(0);
+ source.disconnect();
+ panner.disconnect();
+
+ SimpleTest.finish();
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNodeAbove.html b/dom/media/webaudio/test/test_pannerNodeAbove.html
new file mode 100644
index 0000000000..5931fa04de
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNodeAbove.html
@@ -0,0 +1,50 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode directly above</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ numberOfChannels: 2,
+ createGraph(context) {
+ // An up vector will be made perpendicular to the front vector, in the
+ // front-up plane.
+ context.listener.setOrientation(0, 6.311749985202524e+307, 0, 0.1, 1000, 0);
+ // Linearly dependent vectors are ignored.
+ context.listener.setOrientation(0, 0, -6.311749985202524e+307, 0, 0, 6.311749985202524e+307);
+ var panner = context.createPanner();
+ panner.positionX.value = 2; // directly above
+ panner.rolloffFactor = 0; // no distance gain
+ panner.panningModel = "equalpower"; // no effect when directly above
+
+ var source = context.createBufferSource();
+ source.buffer = this.buffer;
+ source.connect(panner);
+ source.start(0);
+
+ return panner;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ // Different signals in left and right buffers
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ expectedBuffer.getChannelData(1)[i] = Math.sin(220 * 2 * Math.PI * i / context.sampleRate);
+ }
+ this.buffer = expectedBuffer;
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html b/dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html
new file mode 100644
index 0000000000..a0c20f01fe
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html
@@ -0,0 +1,149 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode produces output even when the even when the distance is from the listener is zero</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var BUF_SIZE = 128;
+
+var types = [
+ "equalpower",
+ "HRTF"
+]
+
+var finished = 2 * types.length;
+
+function finish() {
+ if (!--finished) {
+ SimpleTest.finish();
+ }
+}
+
+function testMono(type) {
+ var ac = new OfflineAudioContext(1, BUF_SIZE, 44100);
+
+ // A sine to be used to fill the buffers
+ function sine(t) {
+ return Math.sin(440 * 2 * Math.PI * t / ac.sampleRate);
+ }
+
+ var monoBuffer = ac.createBuffer(1, BUF_SIZE, ac.sampleRate);
+ for (var i = 0; i < BUF_SIZE; ++i) {
+ monoBuffer.getChannelData(0)[i] = sine(i);
+ }
+
+ var monoSource = ac.createBufferSource();
+ monoSource.buffer = monoBuffer;
+ monoSource.start(0);
+
+ var panner = ac.createPanner();
+ panner.distanceModel = "linear";
+ panner.refDistance = 1;
+ panner.positionX.value = 0;
+ panner.positionY.value = 0;
+ panner.positionZ.value = 0;
+ monoSource.connect(panner);
+
+ var panner2 = ac.createPanner();
+ panner2.distanceModel = "inverse";
+ panner2.refDistance = 1;
+ panner2.positionX.value = 0;
+ panner2.positionY.value = 0;
+ panner2.positionZ.value = 0;
+ panner.connect(panner2);
+
+ var panner3 = ac.createPanner();
+ panner3.distanceModel = "exponential";
+ panner3.refDistance = 1;
+ panner3.positionX.value = 0;
+ panner3.positionY.value = 0;
+ panner3.positionZ.value = 0;
+ panner2.connect(panner3);
+
+ panner3.connect(ac.destination);
+
+ // Use the input buffer to compare the output. According to the spec,
+ // mono input at zero distance will apply gain = cos(0.5 * Math.PI / 2)
+ // https://webaudio.github.io/web-audio-api/#Spatialzation-equal-power-panning
+ const gain = Math.cos(0.5 * Math.PI / 2);
+ for (var i = 0; i < BUF_SIZE; ++i) {
+ monoBuffer.getChannelData(0)[i] = gain * monoBuffer.getChannelData(0)[i];
+ }
+
+ ac.startRendering().then(function(buffer) {
+ compareBuffers(buffer, monoBuffer);
+ finish();
+ });
+}
+
+function testStereo(type) {
+ var ac = new OfflineAudioContext(2, BUF_SIZE, 44100);
+
+ // A sine to be used to fill the buffers
+ function sine(t) {
+ return Math.sin(440 * 2 * Math.PI * t / ac.sampleRate);
+ }
+
+ var stereoBuffer = ac.createBuffer(2, BUF_SIZE, ac.sampleRate);
+ for (var i = 0; i < BUF_SIZE; ++i) {
+ stereoBuffer.getChannelData(0)[i] = sine(i);
+ stereoBuffer.getChannelData(1)[i] = sine(i);
+ }
+
+ var stereoSource = ac.createBufferSource();
+ stereoSource.buffer = stereoBuffer;
+ stereoSource.start(0);
+
+ var panner = ac.createPanner();
+ panner.distanceModel = "linear";
+ panner.refDistance = 1;
+ panner.positionX.value = 0;
+ panner.positionY.value = 0;
+ panner.positionZ.value = 0;
+ stereoSource.connect(panner);
+
+ var panner2 = ac.createPanner();
+ panner2.distanceModel = "inverse";
+ panner2.refDistance = 1;
+ panner2.positionX.value = 0;
+ panner2.positionY.value = 0;
+ panner2.positionZ.value = 0;
+ panner.connect(panner2);
+
+ var panner3 = ac.createPanner();
+ panner3.distanceModel = "exponential";
+ panner3.refDistance = 1;
+ panner3.positionX.value = 0;
+ panner3.positionY.value = 0;
+ panner3.positionZ.value = 0;
+ panner2.connect(panner3);
+
+ panner3.connect(ac.destination);
+
+ ac.startRendering().then(function(buffer) {
+ compareBuffers(buffer, stereoBuffer);
+ finish();
+ });
+}
+
+function test(type) {
+ testMono(type);
+ testStereo(type);
+}
+
+addLoadEvent(function() {
+ types.forEach(test);
+});
+
+SimpleTest.waitForExplicitFinish();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNodeChannelCount.html b/dom/media/webaudio/test/test_pannerNodeChannelCount.html
new file mode 100644
index 0000000000..9cb90f32da
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNodeChannelCount.html
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode directly above</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 2,
+ createGraph(context) {
+ var buffer = context.createBuffer(2, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ var sample = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ // When mixed into a single channel, this produces silence
+ buffer.getChannelData(0)[i] = sample;
+ buffer.getChannelData(1)[i] = -sample;
+ }
+
+ var panner = context.createPanner();
+ panner.positionX.value = 1;
+ panner.positionY.value = 2;
+ panner.positionZ.value = 3;
+ panner.channelCount = 1;
+ expectException(function() { panner.channelCount = 3; },
+ DOMException.NOT_SUPPORTED_ERR);
+ panner.channelCountMode = "explicit";
+ expectException(function() { panner.channelCountMode = "max"; },
+ DOMException.NOT_SUPPORTED_ERR);
+ panner.channelInterpretation = "discrete";
+ panner.channelInterpretation = "speakers";
+
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.connect(panner);
+ source.start(0);
+
+ return panner;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html b/dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html
new file mode 100644
index 0000000000..abd03b3898
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html
@@ -0,0 +1,107 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test left/right symmetry and block-offset invariance of HRTF panner</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+const blockSize = 128;
+const bufferSize = 4096; // > HRTF panner latency
+
+var ctx = new AudioContext();
+
+function isChannelSilent(channel) {
+ for (var i = 0; i < channel.length; ++i) {
+ if (channel[i] != 0.0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+function startTest() {
+ var leftPanner = ctx.createPanner();
+ var rightPanner = ctx.createPanner();
+ leftPanner.panningModel = "HRTF";
+ rightPanner.panningModel = "HRTF";
+ leftPanner.positionX.value = -1;
+ rightPanner.positionX.value = 1;
+
+ // Test that PannerNode processes the signal consistently irrespective of
+ // the offset in the processing block. This is done by inserting a delay of
+ // less than a block size before one panner.
+ const delayTime = 0.7 * blockSize / ctx.sampleRate;
+ var leftDelay = ctx.createDelay(delayTime);
+ leftDelay.delayTime.value = delayTime;
+ leftDelay.connect(leftPanner);
+ // and compensating for the delay after the other.
+ var rightDelay = ctx.createDelay(delayTime);
+ rightDelay.delayTime.value = delayTime;
+ rightPanner.connect(rightDelay);
+
+ // Feed the panners with a signal having some harmonics to fill the spectrum.
+ var oscillator = ctx.createOscillator();
+ oscillator.frequency.value = 110;
+ oscillator.type = "sawtooth";
+ oscillator.connect(leftDelay);
+ oscillator.connect(rightPanner);
+ oscillator.start(0);
+
+ // Switch the channels on one panner output, and it should match the other.
+ var splitter = ctx.createChannelSplitter();
+ leftPanner.connect(splitter);
+ var merger = ctx.createChannelMerger();
+ splitter.connect(merger, 0, 1);
+ splitter.connect(merger, 1, 0);
+
+ // Invert one signal so that mixing with the other will find the difference.
+ var gain = ctx.createGain();
+ gain.gain.value = -1.0;
+ merger.connect(gain);
+
+ var processor = ctx.createScriptProcessor(bufferSize, 2, 0);
+ gain.connect(processor);
+ rightDelay.connect(processor);
+ processor.onaudioprocess =
+ function(e) {
+ compareBuffers(e.inputBuffer,
+ ctx.createBuffer(2, bufferSize, ctx.sampleRate));
+ e.target.onaudioprocess = null;
+ SimpleTest.finish();
+ }
+}
+
+function prepareTest() {
+ // A PannerNode will produce no output until it has loaded its HRIR
+ // database. Wait for this to load before starting the test.
+ var processor = ctx.createScriptProcessor(bufferSize, 2, 0);
+ var panner = ctx.createPanner();
+ panner.panningModel = "HRTF";
+ panner.connect(processor);
+ var oscillator = ctx.createOscillator();
+ oscillator.connect(panner);
+ oscillator.start(0);
+
+ processor.onaudioprocess =
+ function(e) {
+ if (isChannelSilent(e.inputBuffer.getChannelData(0)))
+ return;
+
+ oscillator.stop(0);
+ panner.disconnect();
+ e.target.onaudioprocess = null;
+ startTest();
+ };
+}
+prepareTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNodePassThrough.html b/dom/media/webaudio/test/test_pannerNodePassThrough.html
new file mode 100644
index 0000000000..d8c809a2e2
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNodePassThrough.html
@@ -0,0 +1,53 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var panner = context.createPanner();
+
+ source.buffer = this.buffer;
+
+ source.connect(panner);
+
+ context.listener.setOrientation(0, 6.311749985202524e+307, 0, 0.1, 1000, 0);
+ context.listener.setOrientation(0, 0, -6.311749985202524e+307, 0, 0, 6.311749985202524e+307);
+ panner.positionX = 2;
+ panner.rolloffFactor = 0;
+ panner.panningModel = "equalpower";
+
+ var pannerWrapped = SpecialPowers.wrap(panner);
+ ok("passThrough" in pannerWrapped, "PannerNode should support the passThrough API");
+ pannerWrapped.passThrough = true;
+
+ source.start(0);
+ return panner;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNodeTail.html b/dom/media/webaudio/test/test_pannerNodeTail.html
new file mode 100644
index 0000000000..1f6483b581
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNodeTail.html
@@ -0,0 +1,232 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test tail time lifetime of PannerNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// This tests that a PannerNode does not release its reference before
+// it finishes emitting sound.
+//
+// The PannerNode tail time is short, so, when a PannerNode is destroyed on
+// the main thread, it is unlikely to notify the graph thread before the tail
+// time expires. However, by adding DelayNodes downstream from the
+// PannerNodes, the graph thread can have enough time to notice that a
+// DelayNode has been destroyed.
+//
+// In the current implementation, DelayNodes will take a tail-time reference
+// immediately when they receive the first block of sound from an upstream
+// node, so this test connects the downstream DelayNodes while the upstream
+// nodes are finishing, and then runs GC (on the main thread) before the
+// DelayNodes receive any input (on the graph thread).
+//
+// Web Audio doesn't provide a means to precisely time connect()s but we can
+// test that the output of delay nodes matches the output from a reference
+// PannerNode that we know will not be GCed.
+//
+// Another set of delay nodes is added upstream to ensure that the source node
+// has removed its self-reference after dispatching its "ended" event.
+
+SimpleTest.waitForExplicitFinish();
+
+const blockSize = 128;
+// bufferSize should be long enough that to allow an audioprocess event to be
+// sent to the main thread and a connect message to return to the graph
+// thread.
+const bufferSize = 4096;
+const pannerCount = bufferSize / blockSize;
+// sourceDelayBufferCount should be long enough to allow the source node
+// onended to finish and remove the source self-reference.
+const sourceDelayBufferCount = 3;
+var gotEnded = false;
+// ccDelayLength should be long enough to allow CC to run
+var ccDelayBufferCount = 20;
+const ccDelayLength = ccDelayBufferCount * bufferSize;
+
+var ctx;
+var testPanners = [];
+var referencePanner;
+var referenceProcessCount = 0;
+var referenceOutput = [new Float32Array(bufferSize),
+ new Float32Array(bufferSize)];
+var testProcessor;
+var testProcessCount = 0;
+
+function isChannelSilent(channel) {
+ for (var i = 0; i < channel.length; ++i) {
+ if (channel[i] != 0.0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+function onReferenceOutput(e) {
+ switch(referenceProcessCount) {
+
+ case sourceDelayBufferCount - 1:
+ // The panners are about to finish.
+ if (!gotEnded) {
+ todo(false, "Source hasn't ended. Increase sourceDelayBufferCount?");
+ }
+
+ // Connect each PannerNode output to a downstream DelayNode,
+ // and connect ScriptProcessors to compare test and reference panners.
+ var delayDuration = ccDelayLength / ctx.sampleRate;
+ for (var i = 0; i < pannerCount; ++i) {
+ var delay = ctx.createDelay(delayDuration);
+ delay.delayTime.value = delayDuration;
+ delay.connect(testProcessor);
+ testPanners[i].connect(delay);
+ }
+ testProcessor = null;
+ testPanners = null;
+
+ // The panning effect is linear so only one reference panner is required.
+ // This also checks that the individual panners don't chop their output
+ // too soon.
+ referencePanner.connect(e.target);
+
+ // Assuming the above operations have already scheduled an event to run in
+ // stable state and ask the graph thread to make connections, schedule a
+ // subsequent event to run cycle collection, which should not collect
+ // panners that are still producing sound.
+ SimpleTest.executeSoon(function() {
+ SpecialPowers.forceGC();
+ SpecialPowers.forceCC();
+ });
+
+ break;
+
+ case sourceDelayBufferCount:
+ // Record this buffer during which PannerNode outputs were connected.
+ for (var i = 0; i < 2; ++i) {
+ e.inputBuffer.copyFromChannel(referenceOutput[i], i);
+ }
+ e.target.onaudioprocess = null;
+ e.target.disconnect();
+
+ // If the buffer is silent, there is probably not much point just
+ // increasing the buffer size, because, with the buffer size already
+ // significantly larger than panner tail time, it demonstrates that the
+ // lag between threads is much greater than the tail time.
+ if (isChannelSilent(referenceOutput[0])) {
+ todo(false, "Connections not detected.");
+ }
+ }
+
+ referenceProcessCount++;
+}
+
+function onTestOutput(e) {
+ if (testProcessCount < sourceDelayBufferCount + ccDelayBufferCount) {
+ testProcessCount++;
+ return;
+ }
+
+ for (var i = 0; i < 2; ++i) {
+ compareChannels(e.inputBuffer.getChannelData(i), referenceOutput[i]);
+ }
+ e.target.onaudioprocess = null;
+ e.target.disconnect();
+ SimpleTest.finish();
+}
+
+function startTest() {
+ // 0.002 is MaxDelayTimeSeconds in HRTFpanner.cpp
+ // and 512 is fftSize() at 48 kHz.
+ const expectedPannerTailTime = 0.002 * ctx.sampleRate + 512;
+
+ // Create some PannerNodes downstream from DelayNodes with delays long
+ // enough for their source to finish, dispatch its "ended" event
+ // and release its playing reference. The DelayNodes should expire their
+ // tail-time references before the PannerNodes and so only the PannerNode
+ // lifetimes depends on their tail-time references. Many DelayNodes are
+ // created and timed to finish at different times so that one PannerNode
+ // will be finishing the block processed immediately after the connect is
+ // received.
+ var source = ctx.createBufferSource();
+ // Just short of blockSize here to avoid rounding into the next block
+ var buffer = ctx.createBuffer(1, blockSize - 1, ctx.sampleRate);
+ for (var i = 0; i < buffer.length; ++i) {
+ buffer.getChannelData(0)[i] = Math.cos(Math.PI * i / buffer.length);
+ }
+ source.buffer = buffer;
+ source.start(0);
+ source.onended = function(e) {
+ gotEnded = true;
+ };
+
+ // Time the first test panner to finish just before downstream DelayNodes
+ // are about the be connected. Note that DelayNode lifetime depends on
+ // maxDelayTime so set that equal to the delay.
+ var delayDuration =
+ (sourceDelayBufferCount * bufferSize
+ - expectedPannerTailTime - 2 * blockSize) / ctx.sampleRate;
+
+ for (var i = 0; i < pannerCount; ++i) {
+ var delay = ctx.createDelay(delayDuration);
+ delay.delayTime.value = delayDuration;
+ source.connect(delay);
+ delay.connect(referencePanner)
+
+ var panner = ctx.createPanner();
+ panner.panningModel = "HRTF";
+ delay.connect(panner);
+ testPanners[i] = panner;
+
+ delayDuration += blockSize / ctx.sampleRate;
+ }
+
+ // Create a ScriptProcessor now to use as a timer to trigger connection of
+ // downstream nodes. It will also be used to record reference output.
+ var referenceProcessor = ctx.createScriptProcessor(bufferSize, 2, 0);
+ referenceProcessor.onaudioprocess = onReferenceOutput;
+ // Start audioprocess events before source delays are connected.
+ referenceProcessor.connect(ctx.destination);
+
+ // The test ScriptProcessor will record output of testPanners.
+ // Create it now so that it is synchronized with the referenceProcessor.
+ testProcessor = ctx.createScriptProcessor(bufferSize, 2, 0);
+ testProcessor.onaudioprocess = onTestOutput;
+ // Start audioprocess events before source delays are connected.
+ testProcessor.connect(ctx.destination);
+}
+
+function prepareTest() {
+ ctx = new AudioContext();
+ // Place the listener to the side of the origin, where the panners are
+ // positioned, to maximize delay in one ear.
+ ctx.listener.setPosition(1,0,0);
+
+ // A PannerNode will produce no output until it has loaded its HRIR
+ // database. Wait for this to load before starting the test.
+ var processor = ctx.createScriptProcessor(bufferSize, 2, 0);
+ referencePanner = ctx.createPanner();
+ referencePanner.panningModel = "HRTF";
+ referencePanner.connect(processor);
+ var oscillator = ctx.createOscillator();
+ oscillator.connect(referencePanner);
+ oscillator.start(0);
+
+ processor.onaudioprocess = function(e) {
+ if (isChannelSilent(e.inputBuffer.getChannelData(0)))
+ return;
+
+ oscillator.stop(0);
+ oscillator.disconnect();
+ referencePanner.disconnect();
+ e.target.onaudioprocess = null;
+ SimpleTest.executeSoon(startTest);
+ };
+}
+prepareTest();
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNode_audioparam_distance.html b/dom/media/webaudio/test/test_pannerNode_audioparam_distance.html
new file mode 100644
index 0000000000..2d955de19d
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNode_audioparam_distance.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Distance effect of a PannerNode with the position set via AudioParams (Bug 1472550)</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+ SimpleTest.waitForExplicitFinish();
+ var o = new OfflineAudioContext(2, 256, 44100);
+
+ // We want a stereo constant source.
+ var b = o.createBuffer(2, 1, 44100);
+ b.getChannelData(0)[0] = 1;
+ b.getChannelData(1)[0] = 1;
+ var c = o.createBufferSource();
+ c.buffer = b;
+ c.loop = true;
+
+ var p = o.createPanner();
+ p.positionY.setValueAtTime(1, 0);
+ p.positionX.setValueAtTime(1, 0);
+ p.positionZ.setValueAtTime(1, 0);
+
+ // Set the listener somewhere far
+ o.listener.setPosition(20, 2, 20);
+
+ c.start();
+ c.connect(p).connect(o.destination);
+
+ o.startRendering().then((ab) => {
+ // Check that the distance attenuates the sound.
+ ok(ab.getChannelData(0)[0] < 0.1, "left channel must be very quiet");
+ ok(ab.getChannelData(1)[0] < 0.1, "right channel must be very quiet");
+ SimpleTest.finish();
+ });
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNode_equalPower.html b/dom/media/webaudio/test/test_pannerNode_equalPower.html
new file mode 100644
index 0000000000..127a87b254
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNode_equalPower.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<title>Test PannerNode</title>
+<script src="/tests/SimpleTest/SimpleTest.js"></script>
+<script type="text/javascript" src="webaudio.js"></script>
+<script type="text/javascript" src="layouttest-glue.js"></script>
+<script type="text/javascript" src="blink/audio-testing.js"></script>
+<script type="text/javascript" src="blink/panner-model-testing.js"></script>
+<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ function checkFinished() {
+ SimpleTest.finish();
+ }
+ var ctx = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate);
+ createTestAndRun(ctx, nodesToCreate, 2, checkFinished);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_pannerNode_maxDistance.html b/dom/media/webaudio/test/test_pannerNode_maxDistance.html
new file mode 100644
index 0000000000..b5286e56e1
--- /dev/null
+++ b/dom/media/webaudio/test/test_pannerNode_maxDistance.html
@@ -0,0 +1,64 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PannerNode outputs silence when the distance is greater than maxDist</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var types = [
+ "equalpower",
+ "HRTF"
+]
+
+var finished = types.length;
+
+function finish() {
+ if (!--finished) {
+ SimpleTest.finish();
+ }
+}
+
+function test(type) {
+ var ac = new OfflineAudioContext(1, 128, 44100);
+ var osc = ac.createOscillator();
+ var panner = ac.createPanner();
+
+ panner.distanceModel = "linear";
+ panner.maxDistance = 100;
+ panner.positionY.value = 200;
+ ac.listener.setPosition(0, 0, 0);
+
+ osc.connect(panner);
+ panner.connect(ac.destination);
+
+ osc.start();
+
+ ac.startRendering().then(function(buffer) {
+ var silence = true;
+ var array = buffer.getChannelData(0);
+ for (var i = 0; i < buffer.length; i++) {
+ if (array[i] != 0) {
+ ok(false, "Found noise in the buffer.");
+ silence = false;
+ }
+ }
+ ok(silence, "The buffer is silent.");
+ finish();
+ });
+}
+
+
+addLoadEvent(function() {
+ types.forEach(test);
+});
+
+SimpleTest.waitForExplicitFinish();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_periodicWave.html b/dom/media/webaudio/test/test_periodicWave.html
new file mode 100644
index 0000000000..7b8a6ab12c
--- /dev/null
+++ b/dom/media/webaudio/test/test_periodicWave.html
@@ -0,0 +1,130 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test the PeriodicWave interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+// real and imag are used in separate PeriodicWaves to make their peak values
+// easy to determine.
+const realMax = 99;
+var real = new Float32Array(realMax + 1);
+real[1] = 2.0; // fundamental
+real[realMax] = 3.0;
+const realPeak = real[1] + real[realMax];
+const realFundamental = 19.0;
+var imag = new Float32Array(4);
+imag[0] = 6.0; // should be ignored.
+imag[3] = 0.5;
+const imagPeak = imag[3];
+const imagFundamental = 551.0;
+
+const testLength = 4096;
+
+addLoadEvent(function() {
+ var ac = new AudioContext();
+ ac.createPeriodicWave(new Float32Array(4096), new Float32Array(4096));
+ expectException(function() {
+ ac.createPeriodicWave(new Float32Array(512), imag);
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ ac.createPeriodicWave(new Float32Array(0), new Float32Array(0));
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ ac.createPeriodicWave(new Float32Array(1), new Float32Array(1));
+ }, DOMException.INDEX_SIZE_ERR);
+ expectNoException(function() {
+ ac.createPeriodicWave(new Float32Array(4097), new Float32Array(4097));
+ });
+
+ expectNoException(function() {
+ new PeriodicWave(ac, {});
+ });
+
+ // real.size == imag.size
+ expectException(function() {
+ new PeriodicWave(ac, {real: new Float32Array(10), imag: new Float32Array(9)});
+ }, DOMException.INDEX_SIZE_ERR);
+
+ // size lower than 2 is not allowed
+ expectException(function() {
+ new PeriodicWave(ac, {real: new Float32Array(0)});
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ new PeriodicWave(ac, {imag: new Float32Array(0)});
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ new PeriodicWave(ac, {real: new Float32Array(1)});
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ new PeriodicWave(ac, {imag: new Float32Array(1)});
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ new PeriodicWave(ac, {real: new Float32Array(0), imag: new Float32Array(0)});
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ new PeriodicWave(ac, {real: new Float32Array(1), imag: new Float32Array(1)});
+ }, DOMException.INDEX_SIZE_ERR);
+
+ new PeriodicWave(ac, {real: new Float32Array(4096), imag: new Float32Array(4096)});
+ new PeriodicWave(ac, {real: new Float32Array(4096) });
+ new PeriodicWave(ac, {imag: new Float32Array(4096) });
+
+ runTest();
+});
+
+var gTest = {
+ createGraph(context) {
+ var merger = context.createChannelMerger();
+
+ var osc0 = context.createOscillator();
+ var osc1 = context.createOscillator();
+
+ osc0.setPeriodicWave(context.
+ createPeriodicWave(real,
+ new Float32Array(real.length)));
+ osc1.setPeriodicWave(context.
+ createPeriodicWave(new Float32Array(imag.length),
+ imag));
+
+ osc0.frequency.value = realFundamental;
+ osc1.frequency.value = imagFundamental;
+
+ osc0.start();
+ osc1.start();
+
+ osc0.connect(merger, 0, 0);
+ osc1.connect(merger, 0, 1);
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ var buffer = context.createBuffer(2, testLength, context.sampleRate);
+
+ for (var i = 0; i < buffer.length; ++i) {
+
+ buffer.getChannelData(0)[i] = 1.0 / realPeak *
+ (real[1] * Math.cos(2 * Math.PI * realFundamental * i /
+ context.sampleRate) +
+ real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i /
+ context.sampleRate));
+
+ buffer.getChannelData(1)[i] = 1.0 / imagPeak *
+ imag[3] * Math.sin(2 * Math.PI * 3 * imagFundamental * i /
+ context.sampleRate);
+ }
+ return buffer;
+ },
+};
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_periodicWaveBandLimiting.html b/dom/media/webaudio/test/test_periodicWaveBandLimiting.html
new file mode 100644
index 0000000000..70fbb09e2a
--- /dev/null
+++ b/dom/media/webaudio/test/test_periodicWaveBandLimiting.html
@@ -0,0 +1,86 @@
+<!DOCTYPE html>
+<title>Test effect of band limiting on PeriodicWave signals</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+const sampleRate = 48000;
+const bufferSize = 12800;
+const epsilon = 0.01;
+
+// "All implementations must support arrays up to at least 8192", but the
+// linear interpolation of the current implementation distorts the higher
+// frequency components too much to pass this test.
+const frequencyIndexMax = 200;
+
+// A set of oscillators are created near the Nyquist frequency.
+// These are factors giving each oscillator frequency relative to the Nyquist.
+// The first is an octave below Nyquist and the last is just above.
+const OCTAVE_BELOW = 0;
+const HALF_BELOW = 1;
+const NEAR_BELOW = 2;
+const ABOVE = 3;
+const oscillatorFactors = [0.5, Math.sqrt(0.5), 0.99, 1.01];
+const oscillatorCount = oscillatorFactors.length;
+
+// Return magnitude relative to unit sine wave
+function magnitude(array) {
+ var mag = 0
+ for (var i = 0; i < array.length; ++i) {
+ sample = array[i];
+ mag += sample * sample;
+ }
+ return Math.sqrt(2 * mag / array.length);
+}
+
+function test_frequency_index(frequencyIndex) {
+
+ var context =
+ new OfflineAudioContext(oscillatorCount, bufferSize, sampleRate);
+
+ var merger = context.createChannelMerger(oscillatorCount);
+ merger.connect(context.destination);
+
+ var real = new Float32Array(frequencyIndex + 1);
+ real[frequencyIndex] = 1;
+ var image = new Float32Array(real.length);
+ var wave = context.createPeriodicWave(real, image);
+
+ for (var i = 0; i < oscillatorCount; ++i) {
+ var oscillator = context.createOscillator();
+ oscillator.frequency.value =
+ oscillatorFactors[i] * sampleRate / (2 * frequencyIndex);
+ oscillator.connect(merger, 0, i);
+ oscillator.setPeriodicWave(wave);
+ oscillator.start(0);
+ }
+
+ return context.startRendering().
+ then((buffer) => {
+ assert_equals(buffer.numberOfChannels, oscillatorCount);
+ var magnitudes = [];
+ for (var i = 0; i < oscillatorCount; ++i) {
+ magnitudes[i] = magnitude(buffer.getChannelData(i));
+ }
+ // Unaffected by band-limiting one octave below Nyquist.
+ assert_approx_equals(magnitudes[OCTAVE_BELOW], 1, epsilon,
+ "magnitude with frequency octave below Nyquist");
+ // Still at least half the amplitude at half octave below Nyquist.
+ assert_greater_than(magnitudes[HALF_BELOW], 0.5 * (1 - epsilon),
+ "magnitude with frequency half octave below Nyquist");
+ // Approaching zero or zero near Nyquist.
+ assert_less_than(magnitudes[NEAR_BELOW], 0.1,
+ "magnitude with frequency near Nyquist");
+ assert_equals(magnitudes[ABOVE], 0,
+ "magnitude with frequency above Nyquist");
+ });
+}
+
+// The 5/4 ratio with rounding up provides sampling across a range of
+// octaves and offsets within octaves.
+for (var frequencyIndex = 1;
+ frequencyIndex < frequencyIndexMax;
+ frequencyIndex = Math.floor((5 * frequencyIndex + 3) / 4)) {
+ promise_test(test_frequency_index.bind(null, frequencyIndex),
+ "Frequency " + frequencyIndex);
+}
+</script>
diff --git a/dom/media/webaudio/test/test_periodicWaveDisableNormalization.html b/dom/media/webaudio/test/test_periodicWaveDisableNormalization.html
new file mode 100644
index 0000000000..229d48282e
--- /dev/null
+++ b/dom/media/webaudio/test/test_periodicWaveDisableNormalization.html
@@ -0,0 +1,98 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test PeriodicWave disableNormalization Parameter</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+// We create PerodicWave instances containing two tones and compare it to
+// buffers created directly in JavaScript by adding the two waves together.
+// Two of the PeriodicWaves are normalized, the other is not. This test is
+// a modification of test_periodicWave.html.
+//
+// These constants are borrowed from test_periodicWave.html and modified
+// so that the realPeak (which is the normalization factor) will be small
+// enough that the errors are within the bounds for the test.
+const realMax = 99;
+var real = new Float32Array(realMax + 1);
+real[1] = 2.0; // fundamental
+real[realMax] = 0.25;
+
+const realPeak = real[1] + real[realMax];
+const realFundamental = 19.0;
+
+const testLength = 4096;
+
+addLoadEvent(function() {
+ runTest();
+});
+
+var gTest = {
+ createGraph(context) {
+ var merger = context.createChannelMerger();
+
+ var osc0 = context.createOscillator();
+ var osc1 = context.createOscillator();
+ var osc2 = context.createOscillator();
+
+ osc0.setPeriodicWave(context.
+ createPeriodicWave(real,
+ new Float32Array(real.length),
+ {disableNormalization: false}));
+ osc1.setPeriodicWave(context.
+ createPeriodicWave(real,
+ new Float32Array(real.length)));
+ osc2.setPeriodicWave(context.
+ createPeriodicWave(real,
+ new Float32Array(real.length),
+ {disableNormalization: true}));
+
+ osc0.frequency.value = realFundamental;
+ osc1.frequency.value = realFundamental;
+ osc2.frequency.value = realFundamental;
+
+ osc0.start();
+ osc1.start();
+ osc2.start();
+
+ osc0.connect(merger, 0, 0);
+ osc1.connect(merger, 0, 1);
+ osc2.connect(merger, 0, 2);
+
+ return merger;
+ },
+ createExpectedBuffers(context) {
+ var buffer = context.createBuffer(3, testLength, context.sampleRate);
+
+ for (var i = 0; i < buffer.length; ++i) {
+
+ buffer.getChannelData(0)[i] = 1.0 / realPeak *
+ (real[1] * Math.cos(2 * Math.PI * realFundamental * i /
+ context.sampleRate) +
+ real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i /
+ context.sampleRate));
+
+ buffer.getChannelData(1)[i] = buffer.getChannelData(0)[i];
+
+ buffer.getChannelData(2)[i] =
+ (real[1] * Math.cos(2 * Math.PI * realFundamental * i /
+ context.sampleRate) +
+ real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i /
+ context.sampleRate));
+ }
+ return buffer;
+ },
+ 'numberOfChannels': 3,
+};
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_retrospective-exponentialRampToValueAtTime.html b/dom/media/webaudio/test/test_retrospective-exponentialRampToValueAtTime.html
new file mode 100644
index 0000000000..20d3d59faf
--- /dev/null
+++ b/dom/media/webaudio/test/test_retrospective-exponentialRampToValueAtTime.html
@@ -0,0 +1,51 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test exponentialRampToValue with end time in the past</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+function do_test(t, context) {
+ var source = context.createConstantSource();
+ source.start();
+
+ var test = context.createGain();
+ test.gain.exponentialRampToValueAtTime(0.1, 0.5*context.currentTime);
+ test.gain.exponentialRampToValueAtTime(0.9, 2.0);
+
+ var reference = context.createGain();
+ reference.gain.exponentialRampToValueAtTime(0.1, context.currentTime);
+ reference.gain.exponentialRampToValueAtTime(0.9, 2.0);
+
+ source.connect(test);
+ source.connect(reference);
+
+ var merger = context.createChannelMerger();
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ var processor = context.createScriptProcessor(0, 2, 0);
+ merger.connect(processor);
+ processor.onaudioprocess =
+ t.step_func_done((e) => {
+ source.stop();
+ processor.onaudioprocess = null;
+
+ var testValue = e.inputBuffer.getChannelData(0)[0];
+ var referenceValue = e.inputBuffer.getChannelData(1)[0];
+
+ assert_equals(testValue, referenceValue,
+ "value matches expected");
+ });
+}
+
+async_test(function(t) {
+ var context = new AudioContext;
+ (function waitForTimeAdvance() {
+ if (context.currentTime == 0) {
+ t.step_timeout(waitForTimeAdvance, 0);
+ } else {
+ do_test(t, context);
+ }
+ })();
+});
+</script>
diff --git a/dom/media/webaudio/test/test_retrospective-linearRampToValueAtTime.html b/dom/media/webaudio/test/test_retrospective-linearRampToValueAtTime.html
new file mode 100644
index 0000000000..1594a30bd1
--- /dev/null
+++ b/dom/media/webaudio/test/test_retrospective-linearRampToValueAtTime.html
@@ -0,0 +1,51 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test linearRampToValue with end time in the past</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+function do_test(t, context) {
+ var source = context.createConstantSource();
+ source.start();
+
+ var test = context.createGain();
+ test.gain.linearRampToValueAtTime(0.1, 0.5*context.currentTime);
+ test.gain.linearRampToValueAtTime(0.9, 2.0);
+
+ var reference = context.createGain();
+ reference.gain.linearRampToValueAtTime(0.1, context.currentTime);
+ reference.gain.linearRampToValueAtTime(0.9, 2.0);
+
+ source.connect(test);
+ source.connect(reference);
+
+ var merger = context.createChannelMerger();
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ var processor = context.createScriptProcessor(0, 2, 0);
+ merger.connect(processor);
+ processor.onaudioprocess =
+ t.step_func_done((e) => {
+ source.stop();
+ processor.onaudioprocess = null;
+
+ var testValue = e.inputBuffer.getChannelData(0)[0];
+ var referenceValue = e.inputBuffer.getChannelData(1)[0];
+
+ assert_equals(testValue, referenceValue,
+ "value matches expected");
+ });
+}
+
+async_test(function(t) {
+ var context = new AudioContext;
+ (function waitForTimeAdvance() {
+ if (context.currentTime == 0) {
+ t.step_timeout(waitForTimeAdvance, 0);
+ } else {
+ do_test(t, context);
+ }
+ })();
+});
+</script>
diff --git a/dom/media/webaudio/test/test_retrospective-setTargetAtTime.html b/dom/media/webaudio/test/test_retrospective-setTargetAtTime.html
new file mode 100644
index 0000000000..9b04fe22bb
--- /dev/null
+++ b/dom/media/webaudio/test/test_retrospective-setTargetAtTime.html
@@ -0,0 +1,51 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test setTargetAtTime with start time in the past</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+function do_test(t, context) {
+ var source = context.createConstantSource();
+ source.start();
+
+ var test = context.createGain();
+ test.gain.setTargetAtTime(0.1, 0.5*context.currentTime, 0.1);
+ test.gain.linearRampToValueAtTime(0.9, 2.0);
+
+ var reference = context.createGain();
+ reference.gain.setTargetAtTime(0.1, context.currentTime, 0.1);
+ reference.gain.linearRampToValueAtTime(0.9, 2.0);
+
+ source.connect(test);
+ source.connect(reference);
+
+ var merger = context.createChannelMerger();
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ var processor = context.createScriptProcessor(0, 2, 0);
+ merger.connect(processor);
+ processor.onaudioprocess =
+ t.step_func_done((e) => {
+ source.stop();
+ processor.onaudioprocess = null;
+
+ var testValue = e.inputBuffer.getChannelData(0)[0];
+ var referenceValue = e.inputBuffer.getChannelData(1)[0];
+
+ assert_equals(testValue, referenceValue,
+ "value matches expected");
+ });
+}
+
+async_test(function(t) {
+ var context = new AudioContext;
+ (function waitForTimeAdvance() {
+ if (context.currentTime == 0) {
+ t.step_timeout(waitForTimeAdvance, 0);
+ } else {
+ do_test(t, context);
+ }
+ })();
+});
+</script>
diff --git a/dom/media/webaudio/test/test_retrospective-setValueAtTime.html b/dom/media/webaudio/test/test_retrospective-setValueAtTime.html
new file mode 100644
index 0000000000..b9657ef211
--- /dev/null
+++ b/dom/media/webaudio/test/test_retrospective-setValueAtTime.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<title>Test setValueAtTime with startTime in the past</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+function do_test(t, context) {
+ var source = context.createConstantSource();
+ source.start();
+
+ // Use a ramp of slope 1/sample to measure time.
+ // The end value is the extent of exact precision in single precision float.
+ const rampEnd = Math.pow(2, 24);
+ const rampEndSeconds = rampEnd / context.sampleRate;
+ var test = context.createGain();
+ test.gain.setValueAtTime(0.0, 0.5*context.currentTime);
+ test.gain.linearRampToValueAtTime(rampEnd, rampEndSeconds);
+
+ var reference = context.createGain();
+ reference.gain.setValueAtTime(0.0, context.currentTime);
+ reference.gain.linearRampToValueAtTime(rampEnd, rampEndSeconds);
+
+ source.connect(test);
+ source.connect(reference);
+
+ var merger = context.createChannelMerger();
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ var processor = context.createScriptProcessor(0, 2, 0);
+ merger.connect(processor);
+ processor.onaudioprocess =
+ t.step_func_done((e) => {
+ source.stop();
+ processor.onaudioprocess = null;
+
+ var testValue = e.inputBuffer.getChannelData(0)[0];
+ var referenceValue = e.inputBuffer.getChannelData(1)[0];
+
+ assert_equals(testValue, referenceValue,
+ "ramp value matches expected");
+ });
+}
+
+async_test(function(t) {
+ var context = new AudioContext;
+ (function waitForTimeAdvance() {
+ if (context.currentTime == 0) {
+ t.step_timeout(waitForTimeAdvance, 0);
+ } else {
+ do_test(t, context);
+ }
+ })();
+});
+</script>
diff --git a/dom/media/webaudio/test/test_retrospective-setValueCurveAtTime.html b/dom/media/webaudio/test/test_retrospective-setValueCurveAtTime.html
new file mode 100644
index 0000000000..008b240129
--- /dev/null
+++ b/dom/media/webaudio/test/test_retrospective-setValueCurveAtTime.html
@@ -0,0 +1,49 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test SetValueCurve with start time in the past</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+function do_test(t, context) {
+ var source = context.createConstantSource();
+ source.start();
+
+ var test = context.createGain();
+ test.gain.setValueCurveAtTime(new Float32Array([1.0, 0.1]), 0.0, 1.0);
+
+ var reference = context.createGain();
+ reference.gain.setValueCurveAtTime(new Float32Array([1.0, 0.1]), 0.5*context.currentTime, 1.0);
+
+ source.connect(test);
+ source.connect(reference);
+
+ var merger = context.createChannelMerger();
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ var processor = context.createScriptProcessor(0, 2, 0);
+ merger.connect(processor);
+ processor.onaudioprocess =
+ t.step_func_done((e) => {
+ source.stop();
+ processor.onaudioprocess = null;
+
+ var testValue = e.inputBuffer.getChannelData(0)[0];
+ var referenceValue = e.inputBuffer.getChannelData(1)[0];
+
+ assert_equals(testValue, referenceValue,
+ "value matches expected");
+ });
+}
+
+async_test(function(t) {
+ var context = new AudioContext;
+ (function waitForTimeAdvance() {
+ if (context.currentTime == 0) {
+ t.step_timeout(waitForTimeAdvance, 0);
+ } else {
+ do_test(t, context);
+ }
+ })();
+});
+</script>
diff --git a/dom/media/webaudio/test/test_scriptProcessorNode.html b/dom/media/webaudio/test/test_scriptProcessorNode.html
new file mode 100644
index 0000000000..ec263755cb
--- /dev/null
+++ b/dom/media/webaudio/test/test_scriptProcessorNode.html
@@ -0,0 +1,132 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ScriptProcessorNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// We do not use our generic graph test framework here because
+// the testing logic here is sort of complicated, and would
+// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes
+// can experience delays.
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = null;
+
+ var sourceSP = context.createScriptProcessor(2048);
+ sourceSP.addEventListener("audioprocess", function(e) {
+ // generate the audio
+ for (var i = 0; i < 2048; ++i) {
+ // Make sure our first sample won't be zero
+ e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate);
+ e.outputBuffer.getChannelData(1)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate);
+ }
+ // Remember our generated audio
+ buffer = e.outputBuffer;
+
+ sourceSP.removeEventListener("audioprocess", arguments.callee);
+ });
+
+ expectException(function() {
+ context.createScriptProcessor(1);
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ context.createScriptProcessor(2);
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ context.createScriptProcessor(128);
+ }, DOMException.INDEX_SIZE_ERR);
+ expectException(function() {
+ context.createScriptProcessor(255);
+ }, DOMException.INDEX_SIZE_ERR);
+
+ is(sourceSP.channelCount, 2, "script processor node has 2 input channels by default");
+ is(sourceSP.channelCountMode, "explicit", "Correct channelCountMode for the script processor node");
+ is(sourceSP.channelInterpretation, "speakers", "Correct channelCountInterpretation for the script processor node");
+
+ function findFirstNonZeroSample(buffer) {
+ for (var i = 0; i < buffer.length; ++i) {
+ if (buffer.getChannelData(0)[i] != 0) {
+ return i;
+ }
+ }
+ return buffer.length;
+ }
+
+ var sp = context.createScriptProcessor(2048);
+ sourceSP.connect(sp);
+ sp.connect(context.destination);
+ var lastPlaybackTime = 0;
+
+ var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
+
+ function checkAudioProcessingEvent(e) {
+ is(e.target, sp, "Correct event target");
+ ok(e.playbackTime > lastPlaybackTime, "playbackTime correctly set");
+ lastPlaybackTime = e.playbackTime;
+ is(e.inputBuffer.numberOfChannels, 2, "Correct number of channels for the input buffer");
+ is(e.inputBuffer.length, 2048, "Correct length for the input buffer");
+ is(e.inputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the input buffer");
+ is(e.outputBuffer.numberOfChannels, 2, "Correct number of channels for the output buffer");
+ is(e.outputBuffer.length, 2048, "Correct length for the output buffer");
+ is(e.outputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the output buffer");
+
+ compareChannels(e.outputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
+ compareChannels(e.outputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
+ }
+
+ sp.onaudioprocess = function(e) {
+ isnot(buffer, null, "The audioprocess handler for sourceSP must be run at this point");
+ checkAudioProcessingEvent(e);
+
+ // Because of the initial latency added by the second script processor node,
+ // we will never see any generated audio frames in the first callback.
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
+ compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
+
+ sp.onaudioprocess = function(e) {
+ checkAudioProcessingEvent(e);
+
+ var firstNonZero = findFirstNonZeroSample(e.inputBuffer);
+ ok(firstNonZero <= 2048, "First non-zero sample within range");
+
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), 2048 - firstNonZero, firstNonZero, 0);
+ compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), 2048 - firstNonZero, firstNonZero, 0);
+
+ if (firstNonZero == 0) {
+ // If we did not experience any delays, the test is done!
+ sp.onaudioprocess = null;
+
+ SimpleTest.finish();
+ } else if (firstNonZero != 2048) {
+ // In case we just saw a zero buffer this time, wait one more round
+ sp.onaudioprocess = function(e) {
+ checkAudioProcessingEvent(e);
+
+ compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), firstNonZero, 0, 2048 - firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), firstNonZero, 0, 2048 - firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), undefined, firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), undefined, firstNonZero);
+
+ sp.onaudioprocess = null;
+
+ SimpleTest.finish();
+ };
+ }
+ };
+ };
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html b/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html
new file mode 100644
index 0000000000..5e9a9960b7
--- /dev/null
+++ b/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html
@@ -0,0 +1,80 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// We do not use our generic graph test framework here because
+// the testing logic here is sort of complicated, and would
+// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes
+// can experience delays.
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(6, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ for (var j = 0; j < 6; ++j) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * j * Math.PI * i / context.sampleRate);
+ }
+ }
+
+ var monoBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ monoBuffer.getChannelData(0)[i] = 1;
+ }
+
+ var source = context.createBufferSource();
+
+ var sp = context.createScriptProcessor(2048, 3);
+ expectException(function() { sp.channelCount = 2; },
+ DOMException.NOT_SUPPORTED_ERR);
+ sp.channelCountMode = "explicit";
+ expectException(function() { sp.channelCountMode = "max"; },
+ DOMException.NOT_SUPPORTED_ERR);
+ expectException(function() { sp.channelCountMode = "clamped-max"; },
+ DOMException.NOT_SUPPORTED_ERR);
+ sp.channelInterpretation = "discrete";
+ source.start(0);
+ source.buffer = buffer;
+ source.connect(sp);
+ sp.connect(context.destination);
+
+ var monoSource = context.createBufferSource();
+ monoSource.buffer = monoBuffer;
+ monoSource.connect(sp);
+ monoSource.start(2048 / context.sampleRate);
+
+ sp.onaudioprocess = function(e) {
+ is(e.inputBuffer.numberOfChannels, 3, "Should be correctly down-mixed to three channels");
+ for (var i = 0; i < 3; ++i) {
+ compareChannels(e.inputBuffer.getChannelData(i), buffer.getChannelData(i));
+ }
+
+ // On the next iteration, we'll get a silence buffer
+ sp.onaudioprocess = function(e) {
+ var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ is(e.inputBuffer.numberOfChannels, 3, "Should be correctly up-mixed to three channels");
+ compareChannels(e.inputBuffer.getChannelData(0), monoBuffer.getChannelData(0));
+ for (var i = 1; i < 3; ++i) {
+ compareChannels(e.inputBuffer.getChannelData(i), emptyBuffer.getChannelData(0));
+ }
+
+ sp.onaudioprocess = null;
+ sp.disconnect(context.destination);
+
+ SimpleTest.finish();
+ };
+ };
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html b/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html
new file mode 100644
index 0000000000..fb45895380
--- /dev/null
+++ b/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html
@@ -0,0 +1,34 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode: should not fire audioprocess if not connected.</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("This test needs to wait a while to ensure that a given event does not happen.");
+addLoadEvent(function() {
+ var context = new AudioContext();
+
+ var sp = context.createScriptProcessor(2048, 2, 2);
+ sp.onaudioprocess = function(e) {
+ ok(false, "Should not call onaudioprocess if the node is not connected.");
+ sp.onaudioprocess = null;
+ SimpleTest.finish();
+ };
+ setTimeout(function() {
+ console.log(sp.onaudioprocess);
+ if (sp.onaudioprocess) {
+ ok(true, "onaudioprocess not fired.");
+ SimpleTest.finish();
+ }
+ }, 4000);
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html b/dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html
new file mode 100644
index 0000000000..5d2d8170e2
--- /dev/null
+++ b/dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html
@@ -0,0 +1,103 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ScriptProcessorNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+// We do not use our generic graph test framework here because
+// the testing logic here is sort of complicated, and would
+// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes
+// can experience delays.
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = null;
+
+ var sourceSP = context.createScriptProcessor(2048);
+ sourceSP.addEventListener("audioprocess", function(e) {
+ // generate the audio
+ for (var i = 0; i < 2048; ++i) {
+ // Make sure our first sample won't be zero
+ e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate);
+ e.outputBuffer.getChannelData(1)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate);
+ }
+ // Remember our generated audio
+ buffer = e.outputBuffer;
+
+ sourceSP.removeEventListener("audioprocess", arguments.callee);
+ });
+
+ function findFirstNonZeroSample(buffer) {
+ for (var i = 0; i < buffer.length; ++i) {
+ if (buffer.getChannelData(0)[i] != 0) {
+ return i;
+ }
+ }
+ return buffer.length;
+ }
+
+ var sp = context.createScriptProcessor(2048);
+ sourceSP.connect(sp);
+
+ var spWrapped = SpecialPowers.wrap(sp);
+ ok("passThrough" in spWrapped, "ScriptProcessorNode should support the passThrough API");
+ spWrapped.passThrough = true;
+
+ sp.onaudioprocess = function() {
+ ok(false, "The audioprocess event must never be dispatched on the passthrough ScriptProcessorNode");
+ };
+
+ var sp2 = context.createScriptProcessor(2048);
+ sp.connect(sp2);
+ sp2.connect(context.destination);
+
+ var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
+
+ sp2.onaudioprocess = function(e) {
+ // Because of the initial latency added by the second script processor node,
+ // we will never see any generated audio frames in the first callback.
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
+ compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
+
+ sp2.onaudioprocess = function(e) {
+ var firstNonZero = findFirstNonZeroSample(e.inputBuffer);
+ ok(firstNonZero <= 2048, "First non-zero sample within range");
+
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), 2048 - firstNonZero, firstNonZero, 0);
+ compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), 2048 - firstNonZero, firstNonZero, 0);
+
+ if (firstNonZero == 0) {
+ // If we did not experience any delays, the test is done!
+ sp2.onaudioprocess = null;
+
+ SimpleTest.finish();
+ } else if (firstNonZero != 2048) {
+ // In case we just saw a zero buffer this time, wait one more round
+ sp2.onaudioprocess = function(e) {
+ compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), firstNonZero, 0, 2048 - firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), firstNonZero, 0, 2048 - firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), undefined, firstNonZero);
+ compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), undefined, firstNonZero);
+
+ sp2.onaudioprocess = null;
+
+ SimpleTest.finish();
+ };
+ }
+ };
+ };
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html b/dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html
new file mode 100644
index 0000000000..f4b25d49dd
--- /dev/null
+++ b/dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html
@@ -0,0 +1,39 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioBufferSourceNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+
+ var sp = context.createScriptProcessor(2048, 0, 2);
+ sp.onaudioprocess = function(e) {
+ is(e.inputBuffer.numberOfChannels, 0, "Should have 0 input channels");
+ is(e.outputBuffer.numberOfChannels, 2, "Should have 2 output channels");
+ sp.onaudioprocess = null;
+
+ sp = context.createScriptProcessor(2048, 2, 0);
+ sp.onaudioprocess = function(e) {
+ is(e.inputBuffer.numberOfChannels, 2, "Should have 2 input channels");
+ is(e.outputBuffer.numberOfChannels, 0, "Should have 0 output channels");
+ sp.onaudioprocess = null;
+
+ SimpleTest.finish();
+ };
+ sp.connect(context.destination);
+ };
+ sp.connect(context.destination);
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html b/dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html
new file mode 100644
index 0000000000..ec695f952b
--- /dev/null
+++ b/dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html
@@ -0,0 +1,52 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test ScriptProcessorNode playbackTime for bug 970773</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+
+var context = new AudioContext();
+const delay = 0.1;
+
+function doTest() {
+ const processorBufferLength = 256;
+ // |currentTime| may include double precision floating point
+ // rounding errors, so round to nearest integer sample to ignore these.
+ var minimumPlaybackSample =
+ Math.round(context.currentTime * context.sampleRate) +
+ processorBufferLength;
+ var sp = context.createScriptProcessor(processorBufferLength);
+ sp.connect(context.destination);
+ sp.onaudioprocess =
+ function(e) {
+ is(e.inputBuffer.length, processorBufferLength,
+ "expected buffer length");
+ var playbackSample = Math.round(e.playbackTime * context.sampleRate)
+ ok(playbackSample >= minimumPlaybackSample,
+ "playbackSample " + playbackSample +
+ " beyond expected minimum " + minimumPlaybackSample);
+ sp.onaudioprocess = null;
+ SimpleTest.finish();
+ };
+}
+
+// Wait until AudioDestinationNode has accumulated enough 'extra' time so that
+// a failure would be easily detected.
+(function waitForExtraTime() {
+ if (context.currentTime < delay) {
+ SimpleTest.executeSoon(waitForExtraTime);
+ } else {
+ doTest();
+ }
+})();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html b/dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html
new file mode 100644
index 0000000000..5c03a8a911
--- /dev/null
+++ b/dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html
@@ -0,0 +1,72 @@
+<!DOCTYPE html>
+<title>Test seamless playback of a series of resampled buffers</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+// Permitting some accumulation of rounding to int16_t.
+// 64/2^15 would be only just small enough to detect off-by-one-subsample
+// scheduling errors with the frequencies here.
+const EPSILON = 4.0 / Math.pow(2, 15);
+// Offsets test for rounding to nearest rather than up or down.
+const OFFSETS = [EPSILON, 1.0 - EPSILON];
+// The ratio of resampling is 147:160, so 256 start points is enough to cover
+// every fractional offset.
+const LENGTH = 256;
+
+function do_test(context_rate, buffer_rate, start_offset) {
+
+ var context =
+ new OfflineAudioContext(2, LENGTH, context_rate);
+
+ var merger = context.createChannelMerger(context.destination.channelCount);
+ merger.connect(context.destination);
+
+ // Create an audio signal that will be repeated
+ var repeating_signal = context.createBuffer(1, 1, buffer_rate);
+ repeating_signal.getChannelData(0)[0] = 0.5;
+
+ // Schedule a series of nodes to repeat the signal.
+ for (var i = 0; i < LENGTH; ++i) {
+ var source = context.createBufferSource();
+ source.buffer = repeating_signal;
+ source.connect(merger, 0, 0);
+ source.start((i + start_offset) / buffer_rate);
+ }
+
+ // A single long signal should produce the same result.
+ var long_signal = context.createBuffer(1, LENGTH, buffer_rate);
+ var c = long_signal.getChannelData(0);
+ for (var i = 0; i < c.length; ++i) {
+ c[i] = 0.5;
+ }
+
+ var source = context.createBufferSource();
+ source.buffer = long_signal;
+ source.connect(merger, 0, 1);
+ source.start(start_offset / buffer_rate);
+
+ return context.startRendering().
+ then((buffer) => {
+ series_output = buffer.getChannelData(0);
+ expected = buffer.getChannelData(1);
+
+ for (var i = 0; i < buffer.length; ++i) {
+ assert_approx_equals(series_output[i], expected[i], EPSILON,
+ "series output at " + i);
+ }
+ });
+}
+
+function start_tests(context_rate, buffer_rate) {
+ OFFSETS.forEach((start_offset) => {
+ promise_test(() => do_test(context_rate, buffer_rate, start_offset),
+ "" + context_rate + " context, "
+ + buffer_rate + " buffer, "
+ + start_offset + " start");
+ });
+}
+
+start_tests(48000, 44100);
+start_tests(44100, 48000);
+
+</script>
diff --git a/dom/media/webaudio/test/test_setValueCurveWithNonFiniteElements.html b/dom/media/webaudio/test/test_setValueCurveWithNonFiniteElements.html
new file mode 100644
index 0000000000..28829e1ec2
--- /dev/null
+++ b/dom/media/webaudio/test/test_setValueCurveWithNonFiniteElements.html
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset=utf-8>
+<head>
+ <title>Bug 1308437 - setValueCurve should throw on non-finite elements</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+
+function testInfiniteElement(audioContext, audioParam) {
+ // create value curve with infinite element
+ var arr = new Float32Array(5);
+ arr[0] = 0.5;
+ arr[1] = 1;
+ arr[2] = Infinity;
+ arr[3] = 1;
+ arr[4] = 0.5;
+
+ try {
+ audioParam.setValueCurveAtTime(arr, audioContext.currentTime(), 2);
+ ok(false, "We shouldn't be able to call setValueCurve with Infinity but we can");
+ } catch(e) {
+ ok(e instanceof TypeError, "TypeError is thrown");
+ }
+};
+
+function testNanElement(audioContext, audioParam) {
+ // create value curve with infinite element
+ var arr = new Float32Array(5);
+ arr[0] = 0.5;
+ arr[1] = 1;
+ arr[2] = NaN;
+ arr[3] = 1;
+ arr[4] = 0.5;
+
+ try {
+ audioParam.setValueCurveAtTime(arr, audioContext.currentTime(), 2);
+ ok(false, "We shouldn't be able to call setValueCurve with NaN but we can");
+ } catch(e) {
+ ok(e instanceof TypeError, "TypeError is thrown");
+ }
+};
+
+addLoadEvent(function() {
+ var audioContext = new AudioContext();
+ var gainNode = audioContext.createGain();
+
+ testInfiniteElement(audioContext, gainNode.gain);
+ testNanElement(audioContext, gainNode.gain);
+
+ SimpleTest.finish();
+});
+</script>
+</pre>
+</body>
+</html> \ No newline at end of file
diff --git a/dom/media/webaudio/test/test_singleSourceDest.html b/dom/media/webaudio/test/test_singleSourceDest.html
new file mode 100644
index 0000000000..fd4de50f5d
--- /dev/null
+++ b/dom/media/webaudio/test/test_singleSourceDest.html
@@ -0,0 +1,70 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test whether we can create an AudioContext interface</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+addLoadEvent(function() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ var destination = context.destination;
+ is(destination.context, context, "Destination node has proper context");
+ is(destination.context, context, "Destination node has proper context");
+ is(destination.numberOfInputs, 1, "Destination node has 1 inputs");
+ is(destination.numberOfOutputs, 0, "Destination node has 0 outputs");
+ is(destination.channelCount, 2, "Destination node has 2 input channels by default");
+ is(destination.channelCountMode, "explicit", "Correct channelCountMode for the destination node");
+ is(destination.channelInterpretation, "speakers", "Correct channelCountInterpretation for the destination node");
+ ok(destination instanceof EventTarget, "AudioNodes must be EventTargets");
+
+ var source = context.createBufferSource();
+ is(source.context, context, "Source node has proper context");
+ is(source.numberOfInputs, 0, "Source node has 0 inputs");
+ is(source.numberOfOutputs, 1, "Source node has 1 outputs");
+ is(source.loop, false, "Source node is not looping");
+ is(source.loopStart, 0, "Correct default value for loopStart");
+ is(source.loopEnd, 0, "Correct default value for loopEnd");
+ ok(!source.buffer, "Source node should not have a buffer when it's created");
+ is(source.channelCount, 2, "source node has 2 input channels by default");
+ is(source.channelCountMode, "max", "Correct channelCountMode for the source node");
+ is(source.channelInterpretation, "speakers", "Correct channelCountInterpretation for the source node");
+
+ expectException(function() {
+ source.channelCount = 0;
+ }, DOMException.NOT_SUPPORTED_ERR);
+
+ source.buffer = buffer;
+ ok(source.buffer, "Source node should have a buffer now");
+
+ source.connect(destination);
+
+ is(source.numberOfInputs, 0, "Source node has 0 inputs");
+ is(source.numberOfOutputs, 1, "Source node has 0 outputs");
+ is(destination.numberOfInputs, 1, "Destination node has 0 inputs");
+ is(destination.numberOfOutputs, 0, "Destination node has 0 outputs");
+
+ source.start(0);
+ SimpleTest.executeSoon(function() {
+ source.stop(0);
+ source.disconnect();
+
+ SpecialPowers.clearUserPref("media.webaudio.enabled");
+ SimpleTest.finish();
+ });
+});
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_slowStart.html b/dom/media/webaudio/test/test_slowStart.html
new file mode 100644
index 0000000000..17de7351c1
--- /dev/null
+++ b/dom/media/webaudio/test/test_slowStart.html
@@ -0,0 +1,48 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test AudioContext.currentTime</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+SimpleTest.waitForExplicitFinish();
+SimpleTest.requestFlakyTimeout("This test needs to periodically query the AudioContext's position.");
+const CUBEB_INIT_DELAY = 5000;
+// Delay audio stream start by a good 5 seconds
+SpecialPowers.pushPrefEnv({"set": [["media.cubeb.slow_stream_init_ms",
+ CUBEB_INIT_DELAY]]}, runTest);
+
+
+function runTest() {
+ let ac = new AudioContext();
+ let notStartedYetCount = 0;
+ let startWallClockTime = performance.now();
+ is(ac.currentTime, 0, "AudioContext.currentTime should be 0 initially");
+ is(ac.state, "suspended", "AudioContext.currentTime is initially suspended");
+ let intervalHandle = setInterval(function() {
+ if (ac.state == "running" || ac.currentTime > 0) {
+ clearInterval(intervalHandle);
+ return;
+ }
+ is(ac.currentTime, 0, "AudioContext.currentTime is still 0");
+ is(ac.state, "suspended", "AudioContext.currentTime is still suspended");
+ notStartedYetCount++;
+ });
+ ac.onstatechange = function() {
+ is(ac.state, "running", "The AudioContext eventually started.");
+ var startDuration = performance.now() - startWallClockTime;
+ info(`AudioContext start time with a delay of ${CUBEB_INIT_DELAY}): ${startDuration}`);
+ ok(notStartedYetCount > 0, "We should have observed the AudioContext in \"suspended\" state");
+ ok(startDuration >= CUBEB_INIT_DELAY, "The AudioContext state transition was correct.");
+ SimpleTest.finish();
+ }
+}
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_stereoPannerNode.html b/dom/media/webaudio/test/test_stereoPannerNode.html
new file mode 100644
index 0000000000..d08e1640b2
--- /dev/null
+++ b/dom/media/webaudio/test/test_stereoPannerNode.html
@@ -0,0 +1,295 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test StereoPannerNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var SR = 44100;
+var BUF_SIZE = 128;
+var PANNING = 0.1;
+var GAIN = 0.5;
+
+// Cheap reimplementation of some bits of the spec
+function gainForPanningMonoToStereo(panning) {
+ panning += 1;
+ panning /= 2;
+ return [ Math.cos(0.5 * Math.PI * panning),
+ Math.sin(0.5 * Math.PI * panning) ];
+}
+
+function gainForPanningStereoToStereo(panning) {
+ if (panning <= 0) {
+ panning += 1.;
+ }
+ return [ Math.cos(0.5 * Math.PI * panning),
+ Math.sin(0.5 * Math.PI * panning) ];
+}
+
+function applyStereoToStereoPanning(l, r, panningValues, panning) {
+ var outL, outR;
+ if (panning <= 0) {
+ outL = l + r * panningValues[0];
+ outR = r * panningValues[1];
+ } else {
+ outL = l * panningValues[0];
+ outR = r + l * panningValues[1];
+ }
+ return [outL,outR];
+}
+
+function applyMonoToStereoPanning(c, panning) {
+ return [c * panning[0], c * panning[1]];
+}
+
+// Test the DOM interface
+var context = new OfflineAudioContext(1, 1, SR);
+var stereoPanner = new StereoPannerNode(context);
+ok(stereoPanner.pan, "The AudioParam member must exist");
+is(stereoPanner.pan.value, 0.0, "Correct initial value");
+is(stereoPanner.pan.defaultValue, 0.0, "Correct default value");
+is(stereoPanner.channelCount, 2, "StereoPannerNode node has 2 input channels by default");
+is(stereoPanner.channelCountMode, "clamped-max", "Correct channelCountMode for the StereoPannerNode");
+is(stereoPanner.channelInterpretation, "speakers", "Correct channelCountInterpretation for the StereoPannerNode");
+expectException(function() {
+ stereoPanner.channelCount = 3;
+}, DOMException.NOT_SUPPORTED_ERR);
+expectException(function() {
+ stereoPanner.channelCountMode = "max";
+}, DOMException.NOT_SUPPORTED_ERR);
+
+// A sine to be used to fill the buffers
+function sine(t) {
+ return Math.sin(440 * 2 * Math.PI * t / context.sampleRate);
+}
+
+// A couple mono and stereo buffers: the StereoPannerNode equation is different
+// if the input is mono or stereo
+var stereoBuffer = new AudioBuffer({ numberOfChannels: 2,
+ length: BUF_SIZE,
+ sampleRate: context.sampleRate });
+var monoBuffer = new AudioBuffer({ numberOfChannels: 1,
+ length: BUF_SIZE,
+ sampleRate: context.sampleRate });
+for (var i = 0; i < BUF_SIZE; ++i) {
+ monoBuffer.getChannelData(0)[i] =
+ stereoBuffer.getChannelData(0)[i] =
+ stereoBuffer.getChannelData(1)[i] = sine(i);
+}
+
+// Expected test vectors
+function expectedBufferNoop(gain) {
+ gain = gain || 1.0;
+ var expectedBuffer = new AudioBuffer({ numberOfChannels: 2,
+ length: BUF_SIZE,
+ sampleRate: SR });
+ for (var i = 0; i < BUF_SIZE; i++) {
+ expectedBuffer.getChannelData(0)[i] = gain * sine(i);
+ expectedBuffer.getChannelData(1)[i] = gain * sine(i);
+ }
+ return expectedBuffer;
+}
+
+function expectedBufferForStereo(panning, gain) {
+ gain = gain || 1.0;
+ var expectedBuffer = new AudioBuffer({ numberOfChannels: 2,
+ length: BUF_SIZE,
+ sampleRate: SR });
+ var gainPanning = gainForPanningStereoToStereo(panning);
+ for (var i = 0; i < BUF_SIZE; i++) {
+ var values = [ gain * sine(i), gain * sine(i) ];
+ var processed = applyStereoToStereoPanning(values[0], values[1], gainPanning, PANNING);
+ expectedBuffer.getChannelData(0)[i] = processed[0];
+ expectedBuffer.getChannelData(1)[i] = processed[1];
+ }
+ return expectedBuffer;
+}
+
+function expectedBufferForMono(panning, gain) {
+ gain = gain || 1.0;
+ var expectedBuffer = new AudioBuffer({ numberOfChannels: 2,
+ length: BUF_SIZE,
+ sampleRate: SR });
+ var gainPanning = gainForPanningMonoToStereo(panning);
+ gainPanning[0] *= gain;
+ gainPanning[1] *= gain;
+ for (var i = 0; i < BUF_SIZE; i++) {
+ var value = sine(i);
+ var processed = applyMonoToStereoPanning(value, gainPanning);
+ expectedBuffer.getChannelData(0)[i] = processed[0];
+ expectedBuffer.getChannelData(1)[i] = processed[1];
+ }
+ return expectedBuffer;
+}
+
+// Actual test cases
+var tests = [
+ function monoPanningNoop(ctx, panner) {
+ var monoSource = ctx.createBufferSource();
+ monoSource.connect(panner);
+ monoSource.buffer = monoBuffer;
+ monoSource.start(0);
+ return expectedBufferForMono(0);
+ },
+ function stereoPanningNoop(ctx, panner) {
+ var stereoSource = ctx.createBufferSource();
+ stereoSource.connect(panner);
+ stereoSource.buffer = stereoBuffer;
+ stereoSource.start(0);
+ return expectedBufferNoop();
+ },
+ function monoPanningNoopWithGain(ctx, panner) {
+ var monoSource = ctx.createBufferSource();
+ var gain = ctx.createGain();
+ gain.gain.value = GAIN;
+ monoSource.connect(gain);
+ gain.connect(panner);
+ monoSource.buffer = monoBuffer;
+ monoSource.start(0);
+ return expectedBufferForMono(0, GAIN);
+ },
+ function stereoPanningNoopWithGain(ctx, panner) {
+ var stereoSource = ctx.createBufferSource();
+ var gain = ctx.createGain();
+ gain.gain.value = GAIN;
+ stereoSource.connect(gain);
+ gain.connect(panner);
+ stereoSource.buffer = stereoBuffer;
+ stereoSource.start(0);
+ return expectedBufferNoop(GAIN);
+ },
+ function stereoPanningAutomation(ctx, panner) {
+ var stereoSource = ctx.createBufferSource();
+ stereoSource.connect(panner);
+ stereoSource.buffer = stereoBuffer;
+ panner.pan.setValueAtTime(0.1, 0.0);
+ stereoSource.start(0);
+ return expectedBufferForStereo(PANNING);
+ },
+ function stereoPanning(ctx, panner) {
+ var stereoSource = ctx.createBufferSource();
+ stereoSource.buffer = stereoBuffer;
+ stereoSource.connect(panner);
+ panner.pan.value = 0.1;
+ stereoSource.start(0);
+ return expectedBufferForStereo(PANNING);
+ },
+ function monoPanningAutomation(ctx, panner) {
+ var monoSource = ctx.createBufferSource();
+ monoSource.connect(panner);
+ monoSource.buffer = monoBuffer;
+ panner.pan.setValueAtTime(PANNING, 0.0);
+ monoSource.start(0);
+ return expectedBufferForMono(PANNING);
+ },
+ function monoPanning(ctx, panner) {
+ var monoSource = ctx.createBufferSource();
+ monoSource.connect(panner);
+ monoSource.buffer = monoBuffer;
+ panner.pan.value = 0.1;
+ monoSource.start(0);
+ return expectedBufferForMono(PANNING);
+ },
+ function monoPanningWithGain(ctx, panner) {
+ var monoSource = ctx.createBufferSource();
+ var gain = ctx.createGain();
+ gain.gain.value = GAIN;
+ monoSource.connect(gain);
+ gain.connect(panner);
+ monoSource.buffer = monoBuffer;
+ panner.pan.value = 0.1;
+ monoSource.start(0);
+ return expectedBufferForMono(PANNING, GAIN);
+ },
+ function stereoPanningWithGain(ctx, panner) {
+ var stereoSource = ctx.createBufferSource();
+ var gain = ctx.createGain();
+ gain.gain.value = GAIN;
+ stereoSource.connect(gain);
+ gain.connect(panner);
+ stereoSource.buffer = stereoBuffer;
+ panner.pan.value = 0.1;
+ stereoSource.start(0);
+ return expectedBufferForStereo(PANNING, GAIN);
+ },
+ function monoPanningWithGainAndAutomation(ctx, panner) {
+ var monoSource = ctx.createBufferSource();
+ var gain = ctx.createGain();
+ gain.gain.value = GAIN;
+ monoSource.connect(gain);
+ gain.connect(panner);
+ monoSource.buffer = monoBuffer;
+ panner.pan.setValueAtTime(PANNING, 0);
+ monoSource.start(0);
+ return expectedBufferForMono(PANNING, GAIN);
+ },
+ function stereoPanningWithGainAndAutomation(ctx, panner) {
+ var stereoSource = ctx.createBufferSource();
+ var gain = ctx.createGain();
+ gain.gain.value = GAIN;
+ stereoSource.connect(gain);
+ gain.connect(panner);
+ stereoSource.buffer = stereoBuffer;
+ panner.pan.setValueAtTime(PANNING, 0);
+ stereoSource.start(0);
+ return expectedBufferForStereo(PANNING, GAIN);
+ },
+ function bug_1783181(ctx, panner) {
+ const length = 128;
+ const buffer = new AudioBuffer({ length, numberOfChannels: 2, sampleRate: ctx.sampleRate });
+
+ buffer.copyToChannel(new Float32Array([1, 0.5, 0, -0.5, -1]), 0);
+ buffer.copyToChannel(new Float32Array([-0.5, -0.25, 0, 0.25, 0.5]), 1);
+
+ const audioBufferSourceNode = new AudioBufferSourceNode(ctx, { buffer });
+
+ audioBufferSourceNode.connect(panner);
+
+ panner.pan.setValueAtTime(0.5, 0);
+ panner.pan.setValueAtTime(0, 2 / ctx.sampleRate);
+ panner.pan.linearRampToValueAtTime(1, 5 / ctx.sampleRate);
+ panner.pan.cancelScheduledValues(3 / ctx.sampleRate);
+
+ audioBufferSourceNode.start(0);
+
+ const expected = new AudioBuffer({ length, numberOfChannels: 2, sampleRate: ctx.sampleRate });
+ expected.copyToChannel(new Float32Array([ 0.7071067690849304, 0.3535533845424652, 0, -0.5, -1 ]), 0);
+ expected.copyToChannel(new Float32Array([ 0.20710676908493042, 0.10355338454246521, 0, 0.25, 0.5 ]), 1);
+
+ return expected;
+ }
+];
+
+var finished = 0;
+function finish() {
+ if (++finished == tests.length) {
+ SimpleTest.finish();
+ }
+}
+
+tests.forEach(function(f) {
+ var ac = new OfflineAudioContext(2, BUF_SIZE, SR);
+ var panner = ac.createStereoPanner();
+ panner.connect(ac.destination);
+ var expected = f(ac, panner);
+ ac.oncomplete = function(e) {
+ info(f.name);
+ compareBuffers(e.renderedBuffer, expected);
+ finish();
+ };
+ ac.startRendering()
+});
+
+SimpleTest.waitForExplicitFinish();
+
+</script>
+</pre>
+<pre id=dump>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_stereoPannerNodePassThrough.html b/dom/media/webaudio/test/test_stereoPannerNodePassThrough.html
new file mode 100644
index 0000000000..2d774d366b
--- /dev/null
+++ b/dom/media/webaudio/test/test_stereoPannerNodePassThrough.html
@@ -0,0 +1,47 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test StereoPanerNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+
+ var stereoPanner = context.createStereoPanner();
+
+ source.buffer = this.buffer;
+
+ source.connect(stereoPanner);
+
+ var stereoPannerWrapped = SpecialPowers.wrap(stereoPanner);
+ ok("passThrough" in stereoPannerWrapped, "StereoPannerNode should support the passThrough API");
+ stereoPannerWrapped.passThrough = true;
+
+ source.start(0);
+ return stereoPanner;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_stereoPanningWithGain.html b/dom/media/webaudio/test/test_stereoPanningWithGain.html
new file mode 100644
index 0000000000..94dfa3bb92
--- /dev/null
+++ b/dom/media/webaudio/test/test_stereoPanningWithGain.html
@@ -0,0 +1,49 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test stereo equalpower panning with a GainNode</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script src="webaudio.js" type="text/javascript"></script>
+<script class="testbody" type="text/javascript">
+
+const size = 256;
+
+var gTest = {
+ numberOfChannels: 2,
+ createGraph(context) {
+ var panner = context.createPanner();
+ panner.setPosition(1.0, 0.0, 0.0); // reference distance the right
+ panner.panningModel = "equalpower";
+
+ var gain = context.createGain();
+ gain.gain.value = -0.5;
+ gain.connect(panner);
+
+ var buffer = context.createBuffer(2, 2, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ buffer.getChannelData(1)[1] = 1.0;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.connect(gain);
+ source.start(0);
+
+ return panner;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(2, size, context.sampleRate);
+ expectedBuffer.getChannelData(1)[0] = -0.5;
+ expectedBuffer.getChannelData(1)[1] = -0.5;
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_waveDecoder.html b/dom/media/webaudio/test/test_waveDecoder.html
new file mode 100644
index 0000000000..65c429f2ba
--- /dev/null
+++ b/dom/media/webaudio/test/test_waveDecoder.html
@@ -0,0 +1,69 @@
+<!DOCTYPE HTML>
+<html>
+<meta charset=utf-8>
+<head>
+ <title>Test that we decode uint8 and sint16 wave files with correct conversion to float64</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+var testsDone = 0;
+var tests = ["UklGRjUrAABXQVZFZm10IBAAAAABAAEAESsAABErAAABAAgAZGF0YQMAAAD/AIA=",
+ "UklGRkZWAABXQVZFZm10IBAAAAABAAEAESsAACJWAAACABAAZGF0YQYAAAD/fwCAAAA="];
+
+SimpleTest.waitForExplicitFinish();
+
+function base64ToUint8Buffer(b64) {
+ var str = atob(b64)
+ var u8 = new Uint8Array(str.length);
+ for (var i = 0; i < str.length; ++i) {
+ u8[i] = str.charCodeAt(i);
+ }
+ return u8;
+}
+
+function fixupBufferSampleRate(u8, rate) {
+ u8[24] = (rate & 0x000000ff) >> 0;
+ u8[25] = (rate & 0x0000ff00) >> 8;
+ u8[26] = (rate & 0x00ff0000) >> 16;
+ u8[27] = (rate & 0xff000000) >> 24;
+}
+
+function finishTest() {
+ testsDone += 1;
+ if (testsDone == tests.length) {
+ SimpleTest.finish();
+ }
+}
+
+function decodeComplete(b) {
+ ok(true, "Decoding succeeded.");
+ is(b.numberOfChannels, 1, "Should have 1 channel.");
+ is(b.length, 3, "Should have three samples.");
+ var samples = b.getChannelData(0);
+ ok(samples[0] > 0.99 && samples[0] < 1.01, "Check near 1.0. Got " + samples[0]);
+ ok(samples[1] > -1.01 && samples[1] < -0.99, "Check near -1.0. Got " + samples[1]);
+ ok(samples[2] > -0.01 && samples[2] < 0.01, "Check near 0.0. Got " + samples[2]);
+ finishTest();
+}
+
+function decodeFailed() {
+ ok(false, "Decoding failed.");
+ finishTest();
+}
+
+addLoadEvent(function() {
+ var context = new AudioContext();
+
+ for (var i = 0; i < tests.length; ++i) {
+ var u8 = base64ToUint8Buffer(tests[i]);
+ fixupBufferSampleRate(u8, context.sampleRate);
+ context.decodeAudioData(u8.buffer, decodeComplete, decodeFailed);
+ }
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_waveShaper.html b/dom/media/webaudio/test/test_waveShaper.html
new file mode 100644
index 0000000000..9d2f1b3fa2
--- /dev/null
+++ b/dom/media/webaudio/test/test_waveShaper.html
@@ -0,0 +1,60 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test WaveShaperNode with no curve</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+ source.buffer = this.buffer;
+
+ var shaper = new WaveShaperNode(context);
+ shaper.curve = this.curve;
+
+ source.connect(shaper);
+
+ source.start(0);
+ return shaper;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 4096, context.sampleRate);
+ for (var i = 1; i < 4095; ++i) {
+ this.buffer.getChannelData(0)[i] = 2 * (i / 4096) - 1;
+ }
+ // Two out of range values
+ this.buffer.getChannelData(0)[0] = -2;
+ this.buffer.getChannelData(0)[4095] = 2;
+
+ this.curve = new Float32Array(2048);
+ for (var i = 0; i < 2048; ++i) {
+ this.curve[i] = Math.sin(100 * Math.PI * (i + 1) / context.sampleRate);
+ }
+
+ var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate);
+ for (var i = 1; i < 4095; ++i) {
+ var input = this.buffer.getChannelData(0)[i];
+ var index = Math.floor(this.curve.length * (input + 1) / 2);
+ index = Math.max(0, Math.min(this.curve.length - 1, index));
+ expectedBuffer.getChannelData(0)[i] = this.curve[index];
+ }
+ expectedBuffer.getChannelData(0)[0] = this.curve[0];
+ expectedBuffer.getChannelData(0)[4095] = this.curve[2047];
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_waveShaperGain.html b/dom/media/webaudio/test/test_waveShaperGain.html
new file mode 100644
index 0000000000..45411eca02
--- /dev/null
+++ b/dom/media/webaudio/test/test_waveShaperGain.html
@@ -0,0 +1,73 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+<meta charset="utf-8">
+ <title>Test that WaveShaperNode doesn't corrupt its inputs when the gain is !=
+ 1.0 (bug 1203616)</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre>
+</pre>
+<script class="testbody" type="text/javascript">
+SimpleTest.waitForExplicitFinish();
+var samplerate = 44100;
+var context = new OfflineAudioContext(1, 44100, samplerate);
+
+var dc = context.createBufferSource();
+
+var buffer = context.createBuffer(1, 1, samplerate);
+buffer.getChannelData(0)[0] = 1.0;
+dc.buffer = buffer;
+
+var gain = context.createGain();
+var ws2 = context.createWaveShaper();
+var ws = [];
+
+// No-op waveshaper curves.
+for (var i = 0; i < 2; i++) {
+ ws[i] = context.createWaveShaper();
+ var curve = new Float32Array(2);
+ curve[0] = -1.0;
+ curve[1] = 1.0;
+ ws[i].curve = curve;
+ ws[i].connect(context.destination);
+ gain.connect(ws[i]);
+}
+
+dc.connect(gain);
+dc.start();
+
+gain.gain.value = 0.5;
+
+context.startRendering().then(buffer => {
+ document.querySelector("pre").innerHTML = buffer.getChannelData(0)[0];
+ ok(buffer.getChannelData(0)[0] == 1.0, "Volume was handled properly");
+
+ context = new OfflineAudioContext(1, 100, samplerate);
+ var oscillator = context.createOscillator();
+ var gain = context.createGain();
+ var waveShaper = context.createWaveShaper();
+
+ oscillator.start(0);
+ oscillator.connect(gain);
+
+ // to silence
+ gain.gain.value = 0;
+ gain.connect(waveShaper);
+
+ // convert all signal into 1.0. The non unity values are to detect the use
+ // of uninitialized buffers (see Bug 1283910).
+ waveShaper.curve = new Float32Array([ 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5 ]);
+ waveShaper.connect(context.destination);
+
+ context.startRendering().then((buffer) => {
+ var result = buffer.getChannelData(0);
+ ok(result.every(x => x === 1), "WaveShaper handles zero gain properly");
+ SimpleTest.finish();
+ });
+});
+</script>
+</body>
+
diff --git a/dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html b/dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html
new file mode 100644
index 0000000000..0901521a7b
--- /dev/null
+++ b/dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html
@@ -0,0 +1,66 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test WaveShaperNode with an invalid curve</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+ source.buffer = this.buffer;
+
+ var shaper = context.createWaveShaper();
+
+ expectException(() => {
+ shaper.curve = new Float32Array(0);
+ }, DOMException.INVALID_STATE_ERR);
+
+ is(shaper.curve, null, "The curve mustn't have been set");
+
+ expectException(() => {
+ shaper.curve = new Float32Array(1);
+ }, DOMException.INVALID_STATE_ERR);
+
+ is(shaper.curve, null, "The curve mustn't have been set");
+
+ expectNoException(() => {
+ shaper.curve = new Float32Array(2);
+ });
+
+ isnot(shaper.curve, null, "The curve must have been set");
+
+ expectNoException(() => {
+ shaper.curve = null;
+ });
+
+ is(shaper.curve, null, "The curve must be null by default");
+
+ source.connect(shaper);
+
+ source.start(0);
+ return shaper;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+ this.buffer = expectedBuffer;
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_waveShaperNoCurve.html b/dom/media/webaudio/test/test_waveShaperNoCurve.html
new file mode 100644
index 0000000000..2da0b511af
--- /dev/null
+++ b/dom/media/webaudio/test/test_waveShaperNoCurve.html
@@ -0,0 +1,43 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test WaveShaperNode with no curve</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+ source.buffer = this.buffer;
+
+ var shaper = context.createWaveShaper();
+ is(shaper.curve, null, "The shaper curve must be null by default");
+
+ source.connect(shaper);
+
+ source.start(0);
+ return shaper;
+ },
+ createExpectedBuffers(context) {
+ var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate);
+ }
+ this.buffer = expectedBuffer;
+ return expectedBuffer;
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_waveShaperPassThrough.html b/dom/media/webaudio/test/test_waveShaperPassThrough.html
new file mode 100644
index 0000000000..d34add9c90
--- /dev/null
+++ b/dom/media/webaudio/test/test_waveShaperPassThrough.html
@@ -0,0 +1,55 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <title>Test WaveShaperNode with passthrough</title>
+ <script src="/tests/SimpleTest/SimpleTest.js"></script>
+ <script type="text/javascript" src="webaudio.js"></script>
+ <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
+</head>
+<body>
+<pre id="test">
+<script class="testbody" type="text/javascript">
+
+var gTest = {
+ length: 4096,
+ numberOfChannels: 1,
+ createGraph(context) {
+ var source = context.createBufferSource();
+ source.buffer = this.buffer;
+
+ var shaper = context.createWaveShaper();
+ shaper.curve = this.curve;
+
+ var shaperWrapped = SpecialPowers.wrap(shaper);
+ ok("passThrough" in shaperWrapped, "WaveShaperNode should support the passThrough API");
+ shaperWrapped.passThrough = true;
+
+ source.connect(shaper);
+
+ source.start(0);
+ return shaper;
+ },
+ createExpectedBuffers(context) {
+ this.buffer = context.createBuffer(1, 4096, context.sampleRate);
+ for (var i = 1; i < 4095; ++i) {
+ this.buffer.getChannelData(0)[i] = 2 * (i / 4096) - 1;
+ }
+ // Two out of range values
+ this.buffer.getChannelData(0)[0] = -2;
+ this.buffer.getChannelData(0)[4095] = 2;
+
+ this.curve = new Float32Array(2048);
+ for (var i = 0; i < 2048; ++i) {
+ this.curve[i] = Math.sin(100 * Math.PI * (i + 1) / context.sampleRate);
+ }
+
+ return [this.buffer];
+ },
+};
+
+runTest();
+
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/test_webAudio_muteTab.html b/dom/media/webaudio/test/test_webAudio_muteTab.html
new file mode 100644
index 0000000000..ced5c20c9d
--- /dev/null
+++ b/dom/media/webaudio/test/test_webAudio_muteTab.html
@@ -0,0 +1,95 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+ <script type="application/javascript" src="mediaStreamPlayback.js"></script>
+</head>
+<body>
+<pre id="test">
+
+<script>
+createHTML({
+ title: "Check tab muting when the tab plays audio via the Web Audio API",
+ bug: "1346880",
+ visible: false
+});
+
+/**
+ * Check that muting a tab results in no audible audio: mute a tab, in which
+ * an OscillatorNode is playing. The default audio output device is a
+ * pulseaudio null-sink. Simulateously, record the other side of the null
+ * sink, and check that no audio has been written to the sink, because the tab
+ * was muted. Then, umute the tab and check that audio is being sent to the
+ * null-sink. */
+runTest(async () => {
+ if (!SpecialPowers.getCharPref("media.audio_loopback_dev", "")) {
+ todo(false, "No loopback device set by framework. Try --use-test-media-devices");
+ return;
+ }
+
+ // Mute the tab
+ await SpecialPowers.toggleMuteState(true, window.top);
+ // Don't use a loopback tone, the loopback device is here to check that
+ // nothing is output because the tab is muted.
+ DISABLE_LOOPBACK_TONE = true;
+
+ const stream = await getUserMedia({audio: {
+ noiseSuppression: false,
+ echoCancellation: false,
+ autoGainControl: false,
+ }});
+ try {
+ const ac = new AudioContext();
+ const osc = new OscillatorNode(ac);
+ osc.connect(ac.destination);
+ osc.start();
+
+ const analyser = new AudioStreamAnalyser(ac, stream);
+ // Wait for some time, checking there is only ever silent audio in the
+ // loopback stream. `waitForAnalysisSuccess` runs off requestAnimationFrame
+ let silenceFor = 3 / (1 / 60);
+ await analyser.waitForAnalysisSuccess(array => {
+ // `array` has values between 0 and 255, 0 being silence.
+ const sum = array.reduce((acc, v) => { return acc + v; });
+ if (sum == 0) {
+ silenceFor--;
+ } else {
+ info(`Sum of the array values ${sum}`);
+ ok(false, `Found non-silent data in the loopback stream while the tab was muted.`);
+ return true;
+ }
+ if (silenceFor == 0) {
+ ok(true, "Muting the tab was effective");
+ }
+ return silenceFor == 0;
+ });
+
+ // Unmute the tab
+ await SpecialPowers.toggleMuteState(false, window.top);
+
+ await analyser.waitForAnalysisSuccess(array => {
+ // `array` has values between 0 and 255, 0 being silence.
+ const sum = array.reduce((acc, v) => { return acc + v; });
+ if (sum != 0) {
+ info(`Sum after unmuting ${sum}`);
+ ok(true, "Unmuting the tab was effective");
+ return true;
+ } else {
+ // Increment again if we find silence.
+ silenceFor++;
+ if (silenceFor > 100) {
+ ok(false, "Unmuting wasn't effective")
+ return true;
+ }
+ return false;
+ }
+ });
+ } finally {
+ for (let t of stream.getTracks()) {
+ t.stop();
+ }
+ }
+});
+</script>
+</pre>
+</body>
+</html>
diff --git a/dom/media/webaudio/test/ting-44.1k-1ch.ogg b/dom/media/webaudio/test/ting-44.1k-1ch.ogg
new file mode 100644
index 0000000000..a11aaf1cbf
--- /dev/null
+++ b/dom/media/webaudio/test/ting-44.1k-1ch.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/ting-44.1k-1ch.wav b/dom/media/webaudio/test/ting-44.1k-1ch.wav
new file mode 100644
index 0000000000..6854c9d898
--- /dev/null
+++ b/dom/media/webaudio/test/ting-44.1k-1ch.wav
Binary files differ
diff --git a/dom/media/webaudio/test/ting-44.1k-2ch.ogg b/dom/media/webaudio/test/ting-44.1k-2ch.ogg
new file mode 100644
index 0000000000..94e0014858
--- /dev/null
+++ b/dom/media/webaudio/test/ting-44.1k-2ch.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/ting-44.1k-2ch.wav b/dom/media/webaudio/test/ting-44.1k-2ch.wav
new file mode 100644
index 0000000000..703d885892
--- /dev/null
+++ b/dom/media/webaudio/test/ting-44.1k-2ch.wav
Binary files differ
diff --git a/dom/media/webaudio/test/ting-48k-1ch.ogg b/dom/media/webaudio/test/ting-48k-1ch.ogg
new file mode 100644
index 0000000000..f45ce33a58
--- /dev/null
+++ b/dom/media/webaudio/test/ting-48k-1ch.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/ting-48k-1ch.wav b/dom/media/webaudio/test/ting-48k-1ch.wav
new file mode 100644
index 0000000000..8fe471666c
--- /dev/null
+++ b/dom/media/webaudio/test/ting-48k-1ch.wav
Binary files differ
diff --git a/dom/media/webaudio/test/ting-48k-2ch.ogg b/dom/media/webaudio/test/ting-48k-2ch.ogg
new file mode 100644
index 0000000000..e4c564abbd
--- /dev/null
+++ b/dom/media/webaudio/test/ting-48k-2ch.ogg
Binary files differ
diff --git a/dom/media/webaudio/test/ting-48k-2ch.wav b/dom/media/webaudio/test/ting-48k-2ch.wav
new file mode 100644
index 0000000000..ad4d0466da
--- /dev/null
+++ b/dom/media/webaudio/test/ting-48k-2ch.wav
Binary files differ
diff --git a/dom/media/webaudio/test/ting-dualchannel44.1.wav b/dom/media/webaudio/test/ting-dualchannel44.1.wav
new file mode 100644
index 0000000000..62954394d3
--- /dev/null
+++ b/dom/media/webaudio/test/ting-dualchannel44.1.wav
Binary files differ
diff --git a/dom/media/webaudio/test/ting-dualchannel48.wav b/dom/media/webaudio/test/ting-dualchannel48.wav
new file mode 100644
index 0000000000..a0b8247888
--- /dev/null
+++ b/dom/media/webaudio/test/ting-dualchannel48.wav
Binary files differ
diff --git a/dom/media/webaudio/test/webaudio.js b/dom/media/webaudio/test/webaudio.js
new file mode 100644
index 0000000000..dd8ce7fc54
--- /dev/null
+++ b/dom/media/webaudio/test/webaudio.js
@@ -0,0 +1,319 @@
+// Helpers for Web Audio tests
+
+function expectException(func, exceptionCode) {
+ var threw = false;
+ try {
+ func();
+ } catch (ex) {
+ threw = true;
+ is(ex.constructor.name, "DOMException", "Expect a DOM exception");
+ is(ex.code, exceptionCode, "Expect the correct exception code");
+ }
+ ok(threw, "The exception was thrown");
+}
+
+function expectNoException(func) {
+ var threw = false;
+ try {
+ func();
+ } catch (ex) {
+ threw = true;
+ }
+ ok(!threw, "An exception was not thrown");
+}
+
+function expectTypeError(func) {
+ var threw = false;
+ try {
+ func();
+ } catch (ex) {
+ threw = true;
+ ok(ex instanceof TypeError, "Expect a TypeError");
+ }
+ ok(threw, "The exception was thrown");
+}
+
+function expectRejectedPromise(that, func, exceptionName) {
+ var promise = that[func]();
+
+ ok(promise instanceof Promise, "Expect a Promise");
+
+ promise
+ .then(function (res) {
+ ok(false, "Promise resolved when it should have been rejected.");
+ })
+ .catch(function (err) {
+ is(
+ err.name,
+ exceptionName,
+ "Promise correctly reject with " + exceptionName
+ );
+ });
+}
+
+function fuzzyCompare(a, b) {
+ return Math.abs(a - b) < 9e-3;
+}
+
+function compareChannels(
+ buf1,
+ buf2,
+ /*optional*/ length,
+ /*optional*/ sourceOffset,
+ /*optional*/ destOffset,
+ /*optional*/ skipLengthCheck
+) {
+ if (!skipLengthCheck) {
+ is(buf1.length, buf2.length, "Channels must have the same length");
+ }
+ sourceOffset = sourceOffset || 0;
+ destOffset = destOffset || 0;
+ if (length == undefined) {
+ length = buf1.length - sourceOffset;
+ }
+ var difference = 0;
+ var maxDifference = 0;
+ var firstBadIndex = -1;
+ for (var i = 0; i < length; ++i) {
+ if (!fuzzyCompare(buf1[i + sourceOffset], buf2[i + destOffset])) {
+ difference++;
+ maxDifference = Math.max(
+ maxDifference,
+ Math.abs(buf1[i + sourceOffset] - buf2[i + destOffset])
+ );
+ if (firstBadIndex == -1) {
+ firstBadIndex = i;
+ }
+ }
+ }
+
+ is(
+ difference,
+ 0,
+ "maxDifference: " +
+ maxDifference +
+ ", first bad index: " +
+ firstBadIndex +
+ " with test-data offset " +
+ sourceOffset +
+ " and expected-data offset " +
+ destOffset +
+ "; corresponding values " +
+ buf1[firstBadIndex + sourceOffset] +
+ " and " +
+ buf2[firstBadIndex + destOffset] +
+ " --- differences"
+ );
+}
+
+function compareBuffers(got, expected) {
+ if (got.numberOfChannels != expected.numberOfChannels) {
+ is(
+ got.numberOfChannels,
+ expected.numberOfChannels,
+ "Correct number of buffer channels"
+ );
+ return;
+ }
+ if (got.length != expected.length) {
+ is(got.length, expected.length, "Correct buffer length");
+ return;
+ }
+ if (got.sampleRate != expected.sampleRate) {
+ is(got.sampleRate, expected.sampleRate, "Correct sample rate");
+ return;
+ }
+
+ for (var i = 0; i < got.numberOfChannels; ++i) {
+ compareChannels(
+ got.getChannelData(i),
+ expected.getChannelData(i),
+ got.length,
+ 0,
+ 0,
+ true
+ );
+ }
+}
+
+/**
+ * Compute the root mean square (RMS,
+ * <http://en.wikipedia.org/wiki/Root_mean_square>) of a channel of a slice
+ * (defined by `start` and `end`) of an AudioBuffer.
+ *
+ * This is useful to detect that a buffer is noisy or silent.
+ */
+function rms(audiobuffer, channel = 0, start = 0, end = audiobuffer.length) {
+ var buffer = audiobuffer.getChannelData(channel);
+ var rms = 0;
+ for (var i = start; i < end; i++) {
+ rms += buffer[i] * buffer[i];
+ }
+
+ rms /= buffer.length;
+ rms = Math.sqrt(rms);
+ return rms;
+}
+
+function getEmptyBuffer(context, length) {
+ return context.createBuffer(
+ gTest.numberOfChannels,
+ length,
+ context.sampleRate
+ );
+}
+
+/**
+ * This function assumes that the test file defines a single gTest variable with
+ * the following properties and methods:
+ *
+ * + numberOfChannels: optional property which specifies the number of channels
+ * in the output. The default value is 2.
+ * + createGraph: mandatory method which takes a context object and does
+ * everything needed in order to set up the Web Audio graph.
+ * This function returns the node to be inspected.
+ * + createGraphAsync: async version of createGraph. This function takes
+ * a callback which should be called with an argument
+ * set to the node to be inspected when the callee is
+ * ready to proceed with the test. Either this function
+ * or createGraph must be provided.
+ * + createExpectedBuffers: optional method which takes a context object and
+ * returns either one expected buffer or an array of
+ * them, designating what is expected to be observed
+ * in the output. If omitted, the output is expected
+ * to be silence. All buffers must have the same
+ * length, which must be a bufferSize supported by
+ * ScriptProcessorNode. This function is guaranteed
+ * to be called before createGraph.
+ * + length: property equal to the total number of frames which we are waiting
+ * to see in the output, mandatory if createExpectedBuffers is not
+ * provided, in which case it must be a bufferSize supported by
+ * ScriptProcessorNode (256, 512, 1024, 2048, 4096, 8192, or 16384).
+ * If createExpectedBuffers is provided then this must be equal to
+ * the number of expected buffers * the expected buffer length.
+ *
+ * + skipOfflineContextTests: optional. when true, skips running tests on an offline
+ * context by circumventing testOnOfflineContext.
+ */
+function runTest() {
+ function done() {
+ SimpleTest.finish();
+ }
+
+ SimpleTest.waitForExplicitFinish();
+ function runTestFunction() {
+ if (!gTest.numberOfChannels) {
+ gTest.numberOfChannels = 2; // default
+ }
+
+ var testLength;
+
+ function runTestOnContext(context, callback, testOutput) {
+ if (!gTest.createExpectedBuffers) {
+ // Assume that the output is silence
+ var expectedBuffers = getEmptyBuffer(context, gTest.length);
+ } else {
+ var expectedBuffers = gTest.createExpectedBuffers(context);
+ }
+ if (!(expectedBuffers instanceof Array)) {
+ expectedBuffers = [expectedBuffers];
+ }
+ var expectedFrames = 0;
+ for (var i = 0; i < expectedBuffers.length; ++i) {
+ is(
+ expectedBuffers[i].numberOfChannels,
+ gTest.numberOfChannels,
+ "Correct number of channels for expected buffer " + i
+ );
+ expectedFrames += expectedBuffers[i].length;
+ }
+ if (gTest.length && gTest.createExpectedBuffers) {
+ is(expectedFrames, gTest.length, "Correct number of expected frames");
+ }
+
+ if (gTest.createGraphAsync) {
+ gTest.createGraphAsync(context, function (nodeToInspect) {
+ testOutput(nodeToInspect, expectedBuffers, callback);
+ });
+ } else {
+ testOutput(gTest.createGraph(context), expectedBuffers, callback);
+ }
+ }
+
+ function testOnNormalContext(callback) {
+ function testOutput(nodeToInspect, expectedBuffers, callback) {
+ testLength = 0;
+ var sp = context.createScriptProcessor(
+ expectedBuffers[0].length,
+ gTest.numberOfChannels,
+ 0
+ );
+ nodeToInspect.connect(sp);
+ sp.onaudioprocess = function (e) {
+ var expectedBuffer = expectedBuffers.shift();
+ testLength += expectedBuffer.length;
+ compareBuffers(e.inputBuffer, expectedBuffer);
+ if (!expectedBuffers.length) {
+ sp.onaudioprocess = null;
+ callback();
+ }
+ };
+ }
+ var context = new AudioContext();
+ runTestOnContext(context, callback, testOutput);
+ }
+
+ function testOnOfflineContext(callback, sampleRate) {
+ function testOutput(nodeToInspect, expectedBuffers, callback) {
+ nodeToInspect.connect(context.destination);
+ context.oncomplete = function (e) {
+ var samplesSeen = 0;
+ while (expectedBuffers.length) {
+ var expectedBuffer = expectedBuffers.shift();
+ is(
+ e.renderedBuffer.numberOfChannels,
+ expectedBuffer.numberOfChannels,
+ "Correct number of input buffer channels"
+ );
+ for (var i = 0; i < e.renderedBuffer.numberOfChannels; ++i) {
+ compareChannels(
+ e.renderedBuffer.getChannelData(i),
+ expectedBuffer.getChannelData(i),
+ expectedBuffer.length,
+ samplesSeen,
+ undefined,
+ true
+ );
+ }
+ samplesSeen += expectedBuffer.length;
+ }
+ callback();
+ };
+ context.startRendering();
+ }
+
+ var context = new OfflineAudioContext(
+ gTest.numberOfChannels,
+ testLength,
+ sampleRate
+ );
+ runTestOnContext(context, callback, testOutput);
+ }
+
+ testOnNormalContext(function () {
+ if (!gTest.skipOfflineContextTests) {
+ testOnOfflineContext(function () {
+ testOnOfflineContext(done, 44100);
+ }, 48000);
+ } else {
+ done();
+ }
+ });
+ }
+
+ if (document.readyState !== "complete") {
+ addLoadEvent(runTestFunction);
+ } else {
+ runTestFunction();
+ }
+}