summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webaudio
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 09:22:09 +0000
commit43a97878ce14b72f0981164f87f2e35e14151312 (patch)
tree620249daf56c0258faa40cbdcf9cfba06de2a846 /testing/web-platform/tests/webaudio
parentInitial commit. (diff)
downloadfirefox-43a97878ce14b72f0981164f87f2e35e14151312.tar.xz
firefox-43a97878ce14b72f0981164f87f2e35e14151312.zip
Adding upstream version 110.0.1.upstream/110.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'testing/web-platform/tests/webaudio')
-rw-r--r--testing/web-platform/tests/webaudio/META.yml4
-rw-r--r--testing/web-platform/tests/webaudio/README.md5
-rw-r--r--testing/web-platform/tests/webaudio/historical.html29
-rw-r--r--testing/web-platform/tests/webaudio/idlharness.https.window.js72
-rw-r--r--testing/web-platform/tests/webaudio/js/buffer-loader.js44
-rw-r--r--testing/web-platform/tests/webaudio/js/helpers.js250
-rw-r--r--testing/web-platform/tests/webaudio/js/worklet-recorder.js55
-rw-r--r--testing/web-platform/tests/webaudio/resources/4ch-440.wavbin0 -> 353022 bytes
-rw-r--r--testing/web-platform/tests/webaudio/resources/audio-param.js44
-rw-r--r--testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js102
-rw-r--r--testing/web-platform/tests/webaudio/resources/audionodeoptions.js292
-rw-r--r--testing/web-platform/tests/webaudio/resources/audioparam-testing.js554
-rw-r--r--testing/web-platform/tests/webaudio/resources/audit-util.js195
-rw-r--r--testing/web-platform/tests/webaudio/resources/audit.js1447
-rw-r--r--testing/web-platform/tests/webaudio/resources/biquad-filters.js376
-rw-r--r--testing/web-platform/tests/webaudio/resources/biquad-testing.js172
-rw-r--r--testing/web-platform/tests/webaudio/resources/convolution-testing.js168
-rw-r--r--testing/web-platform/tests/webaudio/resources/delay-testing.js66
-rw-r--r--testing/web-platform/tests/webaudio/resources/distance-model-testing.js196
-rw-r--r--testing/web-platform/tests/webaudio/resources/merger-testing.js24
-rw-r--r--testing/web-platform/tests/webaudio/resources/mix-testing.js23
-rw-r--r--testing/web-platform/tests/webaudio/resources/mixing-rules.js350
-rw-r--r--testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js165
-rw-r--r--testing/web-platform/tests/webaudio/resources/panner-formulas.js190
-rw-r--r--testing/web-platform/tests/webaudio/resources/panner-model-testing.js184
-rw-r--r--testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wavbin0 -> 88246 bytes
-rw-r--r--testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js45
-rw-r--r--testing/web-platform/tests/webaudio/resources/stereopanner-testing.js205
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/processing-model/cycle-without-delay.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/processing-model/delay-time-clamping.html43
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/processing-model/feedback-delay-time.html42
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/ctor-analyser.html183
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-basic.html57
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-scaling.html111
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-sizing.html54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-gain.html50
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-minimum.html43
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-output.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-scale.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analysernode.html237
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/acquire-the-content.html85
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html330
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-getChannelData.html66
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-reuse.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer.html71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyFromChannel-bufferOffset-1.html11
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyToChannel-bufferOffset-1.html10
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html236
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html100
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html37
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html97
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html52
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html40
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html78
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html116
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html174
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html101
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html74
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html101
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html116
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html121
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wavbin0 -> 529244 bytes
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html110
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html133
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html423
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html31
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html32
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html33
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html94
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html122
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html111
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html83
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html406
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html145
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html215
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html25
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html14
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html55
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html28
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html22
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html72
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html65
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html278
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html165
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html15
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html221
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html298
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js14
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html93
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html66
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html101
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html144
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html855
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html161
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html103
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html240
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html63
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html73
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html60
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html143
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html497
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html57
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html426
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html120
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js155
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html167
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html155
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html411
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html164
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html79
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html456
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html111
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html139
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html176
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html156
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html49
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html145
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html578
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html88
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html238
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html178
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html48
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html92
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html70
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html70
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html80
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html74
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html67
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js29
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html93
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html48
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html205
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html96
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html85
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html66
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html76
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers2
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html29
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html39
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html62
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html73
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html53
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html149
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html100
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html60
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html80
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html77
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html53
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html30
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html23
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html87
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js94
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js19
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js15
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js16
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js12
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js18
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js22
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js33
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js38
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js22
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js27
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js49
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js19
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js30
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js55
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js18
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js40
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js35
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js25
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js42
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js78
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html90
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-allpass.html42
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-automation.html406
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-bandpass.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-basic.html134
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-getFrequencyResponse.html394
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highpass.html42
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highshelf.html43
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html45
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowshelf.html43
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-notch.html43
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-peaking.html46
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-tail.html71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquadfilternode-basic.html64
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/ctor-biquadfilter.html86
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html288
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html93
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-basic.html67
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html82
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input-non-default.html79
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input.html113
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/ctor-channelmerger.html112
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/audiochannelsplitter.html141
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/ctor-channelsplitter.html115
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-basic.html85
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-onended.html38
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-output.html207
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/ctor-constantsource.html50
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html135
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html93
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolution-mono-mono.html62
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-cascade.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-channels.html43
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html406
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-2-chan.html373
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html508
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-already-has-value.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-null.html31
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-upmixing-1-channel-response.html143
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/ctor-convolver.html186
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/realtime-conv.html149
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/transferred-buffer-output.html107
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/ctor-delay.html76
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delay-test.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-channel-count-1.html104
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-default-delay.html49
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-nondefault-delay.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelay.html54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelaylimit.html68
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-scheduling.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/maxdelay-rounding.html78
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html184
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/ctor-dynamicscompressor.html199
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/dynamicscompressor-basic.html48
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/ctor-gain.html79
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-basic.html37
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html162
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html121
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/ctor-iirfilter.html126
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-basic.html204
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-getFrequencyResponse.html159
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter.html572
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html76
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html130
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html75
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/ctor-mediastreamaudiodestination.html64
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-ctor.html73
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html127
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/ctor-offlineaudiocontext.html203
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html17
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/offlineaudiocontext-detached-execution-context.html34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/startrendering-after-discard.html24
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/ctor-oscillator.html112
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-limiting.html154
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-overflow.html41
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/osc-basic-waveform.html229
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html140
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html468
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-exponential.html34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-inverse.html28
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-linear.html30
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html298
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-equalpower-stereo.html47
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html265
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-azimuth.html51
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-distance-clamping.html227
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower-stereo.html44
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower.html139
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-rolloff-clamping.html98
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.window.js71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-setposition-throws.html37
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html36
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/createPeriodicWaveInfiniteValuesThrows.html22
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/periodicWave.html130
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/simple-input-output.html98
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/ctor-stereopanner.html131
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html261
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-basic.html54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-panning.html34
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/ctor-waveshaper.html72
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html184
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/silent-inputs.html103
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html100
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-limits.html110
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-simple.html61
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper.html127
309 files changed, 35264 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/META.yml b/testing/web-platform/tests/webaudio/META.yml
new file mode 100644
index 0000000000..3bcd1cb8d3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/META.yml
@@ -0,0 +1,4 @@
+spec: https://webaudio.github.io/web-audio-api/
+suggested_reviewers:
+ - hoch
+ - padenot
diff --git a/testing/web-platform/tests/webaudio/README.md b/testing/web-platform/tests/webaudio/README.md
new file mode 100644
index 0000000000..bcfe291ff3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/README.md
@@ -0,0 +1,5 @@
+Our test suite is currently tracking the [editor's draft](https://webaudio.github.io/web-audio-api/) of the Web Audio API.
+
+The tests are arranged in subdirectories, corresponding to different
+sections of the spec. So, for example, tests for the `DelayNode` are
+in `the-audio-api/the-delaynode-interface`.
diff --git a/testing/web-platform/tests/webaudio/historical.html b/testing/web-platform/tests/webaudio/historical.html
new file mode 100644
index 0000000000..1f3146c39d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/historical.html
@@ -0,0 +1,29 @@
+<!doctype html>
+<title>Historical Web Audio API features</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+[
+ "webkitAudioContext",
+ "webkitAudioPannerNode",
+ "webkitOfflineAudioContext",
+].forEach(name => {
+ test(function() {
+ assert_false(name in window);
+ }, name + " interface should not exist");
+});
+
+[
+ "dopplerFactor",
+ "speedOfSound",
+ "setVelocity"
+].forEach(name => {
+ test(function() {
+ assert_false(name in AudioListener.prototype);
+ }, name + " member should not exist on the AudioListener.");
+});
+
+test(function() {
+ assert_false("setVelocity" in PannerNode.prototype);
+}, "setVelocity should not exist on PannerNodes.");
+</script>
diff --git a/testing/web-platform/tests/webaudio/idlharness.https.window.js b/testing/web-platform/tests/webaudio/idlharness.https.window.js
new file mode 100644
index 0000000000..e941a75c26
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/idlharness.https.window.js
@@ -0,0 +1,72 @@
+// META: script=/resources/WebIDLParser.js
+// META: script=/resources/idlharness.js
+// META: timeout=long
+
+// https://webaudio.github.io/web-audio-api/
+
+'use strict';
+
+idl_test(
+ ['webaudio'],
+ ['cssom', 'uievents', 'mediacapture-streams', 'html', 'dom'],
+ async idl_array => {
+ idl_array.add_untested_idls('interface SVGElement {};');
+
+ idl_array.add_objects({
+ BaseAudioContext: [],
+ AudioContext: ['context'],
+ OfflineAudioContext: ['new OfflineAudioContext(1, 1, sample_rate)'],
+ OfflineAudioCompletionEvent: [
+ 'new OfflineAudioCompletionEvent("", {renderedBuffer: buffer})'
+ ],
+ AudioBuffer: ['buffer'],
+ AudioNode: [],
+ AudioParam: ['new AudioBufferSourceNode(context).playbackRate'],
+ AudioScheduledSourceNode: [],
+ AnalyserNode: ['new AnalyserNode(context)'],
+ AudioBufferSourceNode: ['new AudioBufferSourceNode(context)'],
+ AudioDestinationNode: ['context.destination'],
+ AudioListener: ['context.listener'],
+ AudioProcessingEvent: [`new AudioProcessingEvent('', {
+ playbackTime: 0, inputBuffer: buffer, outputBuffer: buffer
+ })`],
+ BiquadFilterNode: ['new BiquadFilterNode(context)'],
+ ChannelMergerNode: ['new ChannelMergerNode(context)'],
+ ChannelSplitterNode: ['new ChannelSplitterNode(context)'],
+ ConstantSourceNode: ['new ConstantSourceNode(context)'],
+ ConvolverNode: ['new ConvolverNode(context)'],
+ DelayNode: ['new DelayNode(context)'],
+ DynamicsCompressorNode: ['new DynamicsCompressorNode(context)'],
+ GainNode: ['new GainNode(context)'],
+ IIRFilterNode: [
+ 'new IIRFilterNode(context, {feedforward: [1], feedback: [1]})'
+ ],
+ MediaElementAudioSourceNode: [
+ 'new MediaElementAudioSourceNode(context, {mediaElement: new Audio})'
+ ],
+ MediaStreamAudioDestinationNode: [
+ 'new MediaStreamAudioDestinationNode(context)'
+ ],
+ MediaStreamAudioSourceNode: [],
+ MediaStreamTrackAudioSourceNode: [],
+ OscillatorNode: ['new OscillatorNode(context)'],
+ PannerNode: ['new PannerNode(context)'],
+ PeriodicWave: ['new PeriodicWave(context)'],
+ ScriptProcessorNode: ['context.createScriptProcessor()'],
+ StereoPannerNode: ['new StereoPannerNode(context)'],
+ WaveShaperNode: ['new WaveShaperNode(context)'],
+ AudioWorklet: ['context.audioWorklet'],
+ AudioWorkletGlobalScope: [],
+ AudioParamMap: ['worklet_node.parameters'],
+ AudioWorkletNode: ['worklet_node'],
+ AudioWorkletProcessor: [],
+ });
+
+ self.sample_rate = 44100;
+ self.context = new AudioContext;
+ self.buffer = new AudioBuffer({length: 1, sampleRate: sample_rate});
+ await context.audioWorklet.addModule(
+ 'the-audio-api/the-audioworklet-interface/processors/dummy-processor.js');
+ self.worklet_node = new AudioWorkletNode(context, 'dummy');
+ }
+);
diff --git a/testing/web-platform/tests/webaudio/js/buffer-loader.js b/testing/web-platform/tests/webaudio/js/buffer-loader.js
new file mode 100644
index 0000000000..453dc4a521
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/js/buffer-loader.js
@@ -0,0 +1,44 @@
+/* Taken from
+ https://raw.github.com/WebKit/webkit/master/LayoutTests/webaudio/resources/buffer-loader.js */
+
+function BufferLoader(context, urlList, callback) {
+ this.context = context;
+ this.urlList = urlList;
+ this.onload = callback;
+ this.bufferList = new Array();
+ this.loadCount = 0;
+}
+
+BufferLoader.prototype.loadBuffer = function(url, index) {
+ // Load buffer asynchronously
+ var request = new XMLHttpRequest();
+ request.open("GET", url, true);
+ request.responseType = "arraybuffer";
+
+ var loader = this;
+
+ request.onload = function() {
+ loader.context.decodeAudioData(request.response, decodeSuccessCallback, decodeErrorCallback);
+ };
+
+ request.onerror = function() {
+ alert('BufferLoader: XHR error');
+ };
+
+ var decodeSuccessCallback = function(buffer) {
+ loader.bufferList[index] = buffer;
+ if (++loader.loadCount == loader.urlList.length)
+ loader.onload(loader.bufferList);
+ };
+
+ var decodeErrorCallback = function() {
+ alert('decodeErrorCallback: decode error');
+ };
+
+ request.send();
+}
+
+BufferLoader.prototype.load = function() {
+ for (var i = 0; i < this.urlList.length; ++i)
+ this.loadBuffer(this.urlList[i], i);
+}
diff --git a/testing/web-platform/tests/webaudio/js/helpers.js b/testing/web-platform/tests/webaudio/js/helpers.js
new file mode 100644
index 0000000000..413c72051b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/js/helpers.js
@@ -0,0 +1,250 @@
+/*
+ Returns an array (typed or not), of the passed array with removed trailing and ending
+ zero-valued elements
+ */
+function trimEmptyElements(array) {
+ var start = 0;
+ var end = array.length;
+
+ while (start < array.length) {
+ if (array[start] !== 0) {
+ break;
+ }
+ start++;
+ }
+
+ while (end > 0) {
+ end--;
+ if (array[end] !== 0) {
+ break;
+ }
+ }
+ return array.subarray(start, end);
+}
+
+
+function fuzzyCompare(a, b) {
+ return Math.abs(a - b) < 9e-3;
+}
+
+function compareChannels(buf1, buf2,
+ /*optional*/ length,
+ /*optional*/ sourceOffset,
+ /*optional*/ destOffset,
+ /*optional*/ skipLengthCheck) {
+ if (!skipLengthCheck) {
+ assert_equals(buf1.length, buf2.length, "Channels must have the same length");
+ }
+ sourceOffset = sourceOffset || 0;
+ destOffset = destOffset || 0;
+ if (length == undefined) {
+ length = buf1.length - sourceOffset;
+ }
+ var difference = 0;
+ var maxDifference = 0;
+ var firstBadIndex = -1;
+ for (var i = 0; i < length; ++i) {
+ if (!fuzzyCompare(buf1[i + sourceOffset], buf2[i + destOffset])) {
+ difference++;
+ maxDifference = Math.max(maxDifference, Math.abs(buf1[i + sourceOffset] - buf2[i + destOffset]));
+ if (firstBadIndex == -1) {
+ firstBadIndex = i;
+ }
+ }
+ };
+
+ assert_equals(difference, 0, "maxDifference: " + maxDifference +
+ ", first bad index: " + firstBadIndex + " with test-data offset " +
+ sourceOffset + " and expected-data offset " + destOffset +
+ "; corresponding values " + buf1[firstBadIndex + sourceOffset] + " and " +
+ buf2[firstBadIndex + destOffset] + " --- differences");
+}
+
+function compareBuffers(got, expected) {
+ if (got.numberOfChannels != expected.numberOfChannels) {
+ assert_equals(got.numberOfChannels, expected.numberOfChannels,
+ "Correct number of buffer channels");
+ return;
+ }
+ if (got.length != expected.length) {
+ assert_equals(got.length, expected.length,
+ "Correct buffer length");
+ return;
+ }
+ if (got.sampleRate != expected.sampleRate) {
+ assert_equals(got.sampleRate, expected.sampleRate,
+ "Correct sample rate");
+ return;
+ }
+
+ for (var i = 0; i < got.numberOfChannels; ++i) {
+ compareChannels(got.getChannelData(i), expected.getChannelData(i),
+ got.length, 0, 0, true);
+ }
+}
+
+/**
+ * This function assumes that the test is a "single page test" [0], and defines a
+ * single gTest variable with the following properties and methods:
+ *
+ * + numberOfChannels: optional property which specifies the number of channels
+ * in the output. The default value is 2.
+ * + createGraph: mandatory method which takes a context object and does
+ * everything needed in order to set up the Web Audio graph.
+ * This function returns the node to be inspected.
+ * + createGraphAsync: async version of createGraph. This function takes
+ * a callback which should be called with an argument
+ * set to the node to be inspected when the callee is
+ * ready to proceed with the test. Either this function
+ * or createGraph must be provided.
+ * + createExpectedBuffers: optional method which takes a context object and
+ * returns either one expected buffer or an array of
+ * them, designating what is expected to be observed
+ * in the output. If omitted, the output is expected
+ * to be silence. All buffers must have the same
+ * length, which must be a bufferSize supported by
+ * ScriptProcessorNode. This function is guaranteed
+ * to be called before createGraph.
+ * + length: property equal to the total number of frames which we are waiting
+ * to see in the output, mandatory if createExpectedBuffers is not
+ * provided, in which case it must be a bufferSize supported by
+ * ScriptProcessorNode (256, 512, 1024, 2048, 4096, 8192, or 16384).
+ * If createExpectedBuffers is provided then this must be equal to
+ * the number of expected buffers * the expected buffer length.
+ *
+ * + skipOfflineContextTests: optional. when true, skips running tests on an offline
+ * context by circumventing testOnOfflineContext.
+ *
+ * [0]: https://web-platform-tests.org/writing-tests/testharness-api.html#single-page-tests
+ */
+function runTest(name)
+{
+ function runTestFunction () {
+ if (!gTest.numberOfChannels) {
+ gTest.numberOfChannels = 2; // default
+ }
+
+ var testLength;
+
+ function runTestOnContext(context, callback, testOutput) {
+ if (!gTest.createExpectedBuffers) {
+ // Assume that the output is silence
+ var expectedBuffers = getEmptyBuffer(context, gTest.length);
+ } else {
+ var expectedBuffers = gTest.createExpectedBuffers(context);
+ }
+ if (!(expectedBuffers instanceof Array)) {
+ expectedBuffers = [expectedBuffers];
+ }
+ var expectedFrames = 0;
+ for (var i = 0; i < expectedBuffers.length; ++i) {
+ assert_equals(expectedBuffers[i].numberOfChannels, gTest.numberOfChannels,
+ "Correct number of channels for expected buffer " + i);
+ expectedFrames += expectedBuffers[i].length;
+ }
+ if (gTest.length && gTest.createExpectedBuffers) {
+ assert_equals(expectedFrames,
+ gTest.length, "Correct number of expected frames");
+ }
+
+ if (gTest.createGraphAsync) {
+ gTest.createGraphAsync(context, function(nodeToInspect) {
+ testOutput(nodeToInspect, expectedBuffers, callback);
+ });
+ } else {
+ testOutput(gTest.createGraph(context), expectedBuffers, callback);
+ }
+ }
+
+ function testOnNormalContext(callback) {
+ function testOutput(nodeToInspect, expectedBuffers, callback) {
+ testLength = 0;
+ var sp = context.createScriptProcessor(expectedBuffers[0].length, gTest.numberOfChannels, 1);
+ nodeToInspect.connect(sp).connect(context.destination);
+ sp.onaudioprocess = function(e) {
+ var expectedBuffer = expectedBuffers.shift();
+ testLength += expectedBuffer.length;
+ compareBuffers(e.inputBuffer, expectedBuffer);
+ if (expectedBuffers.length == 0) {
+ sp.onaudioprocess = null;
+ callback();
+ }
+ };
+ }
+ var context = new AudioContext();
+ runTestOnContext(context, callback, testOutput);
+ }
+
+ function testOnOfflineContext(callback, sampleRate) {
+ function testOutput(nodeToInspect, expectedBuffers, callback) {
+ nodeToInspect.connect(context.destination);
+ context.oncomplete = function(e) {
+ var samplesSeen = 0;
+ while (expectedBuffers.length) {
+ var expectedBuffer = expectedBuffers.shift();
+ assert_equals(e.renderedBuffer.numberOfChannels, expectedBuffer.numberOfChannels,
+ "Correct number of input buffer channels");
+ for (var i = 0; i < e.renderedBuffer.numberOfChannels; ++i) {
+ compareChannels(e.renderedBuffer.getChannelData(i),
+ expectedBuffer.getChannelData(i),
+ expectedBuffer.length,
+ samplesSeen,
+ undefined,
+ true);
+ }
+ samplesSeen += expectedBuffer.length;
+ }
+ callback();
+ };
+ context.startRendering();
+ }
+
+ var context = new OfflineAudioContext(gTest.numberOfChannels, testLength, sampleRate);
+ runTestOnContext(context, callback, testOutput);
+ }
+
+ testOnNormalContext(function() {
+ if (!gTest.skipOfflineContextTests) {
+ testOnOfflineContext(function() {
+ testOnOfflineContext(done, 44100);
+ }, 48000);
+ } else {
+ done();
+ }
+ });
+ };
+
+ runTestFunction();
+}
+
+// Simpler than audit.js, but still logs the message. Requires
+// `setup("explicit_done": true)` if testing code that runs after the "load"
+// event.
+function equals(a, b, msg) {
+ test(function() {
+ assert_equals(a, b);
+ }, msg);
+}
+function is_true(a, msg) {
+ test(function() {
+ assert_true(a);
+ }, msg);
+}
+
+// This allows writing AudioWorkletProcessor code in the same file as the rest
+// of the test, for quick one off AudioWorkletProcessor testing.
+function URLFromScriptsElements(ids)
+{
+ var scriptTexts = [];
+ for (let id of ids) {
+
+ const e = document.querySelector("script#"+id)
+ if (!e) {
+ throw id+" is not the id of a <script> tag";
+ }
+ scriptTexts.push(e.innerText);
+ }
+ const blob = new Blob(scriptTexts, {type: "application/javascript"});
+
+ return URL.createObjectURL(blob);
+}
diff --git a/testing/web-platform/tests/webaudio/js/worklet-recorder.js b/testing/web-platform/tests/webaudio/js/worklet-recorder.js
new file mode 100644
index 0000000000..913ab742aa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/js/worklet-recorder.js
@@ -0,0 +1,55 @@
+/**
+ * @class RecorderProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * A simple recorder AudioWorkletProcessor. Returns the recorded buffer to the
+ * node when recording is finished.
+ */
+class RecorderProcessor extends AudioWorkletProcessor {
+ /**
+ * @param {*} options
+ * @param {number} options.duration A duration to record in seconds.
+ * @param {number} options.channelCount A channel count to record.
+ */
+ constructor(options) {
+ super();
+ this._createdAt = currentTime;
+ this._elapsed = 0;
+ this._recordDuration = options.duration || 1;
+ this._recordChannelCount = options.channelCount || 1;
+ this._recordBufferLength = sampleRate * this._recordDuration;
+ this._recordBuffer = [];
+ for (let i = 0; i < this._recordChannelCount; ++i) {
+ this._recordBuffer[i] = new Float32Array(this._recordBufferLength);
+ }
+ }
+
+ process(inputs, outputs) {
+ if (this._recordBufferLength <= currentFrame) {
+ this.port.postMessage({
+ type: 'recordfinished',
+ recordBuffer: this._recordBuffer
+ });
+ this.port.close();
+ return false;
+ }
+
+ // Records the incoming data from |inputs| and also bypasses the data to
+ // |outputs|.
+ const input = inputs[0];
+ const output = outputs[0];
+ for (let channel = 0; channel < input.length; ++channel) {
+ const inputChannel = input[channel];
+ const outputChannel = output[channel];
+ outputChannel.set(inputChannel);
+
+ const buffer = this._recordBuffer[channel];
+ const capacity = buffer.length - currentFrame;
+ buffer.set(inputChannel.slice(0, capacity), currentFrame);
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('recorder-processor', RecorderProcessor);
diff --git a/testing/web-platform/tests/webaudio/resources/4ch-440.wav b/testing/web-platform/tests/webaudio/resources/4ch-440.wav
new file mode 100644
index 0000000000..85dc1ea904
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/4ch-440.wav
Binary files differ
diff --git a/testing/web-platform/tests/webaudio/resources/audio-param.js b/testing/web-platform/tests/webaudio/resources/audio-param.js
new file mode 100644
index 0000000000..bc33fe8a21
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/audio-param.js
@@ -0,0 +1,44 @@
+// Define functions that implement the formulas for AudioParam automations.
+
+// AudioParam linearRamp value at time t for a linear ramp between (t0, v0) and
+// (t1, v1). It is assumed that t0 <= t. Results are undefined otherwise.
+function audioParamLinearRamp(t, v0, t0, v1, t1) {
+ if (t >= t1)
+ return v1;
+ return (v0 + (v1 - v0) * (t - t0) / (t1 - t0))
+}
+
+// AudioParam exponentialRamp value at time t for an exponential ramp between
+// (t0, v0) and (t1, v1). It is assumed that t0 <= t. Results are undefined
+// otherwise.
+function audioParamExponentialRamp(t, v0, t0, v1, t1) {
+ if (t >= t1)
+ return v1;
+ return v0 * Math.pow(v1 / v0, (t - t0) / (t1 - t0));
+}
+
+// AudioParam setTarget value at time t for a setTarget curve starting at (t0,
+// v0) with a final value of vFainal and a time constant of timeConstant. It is
+// assumed that t0 <= t. Results are undefined otherwise.
+function audioParamSetTarget(t, v0, t0, vFinal, timeConstant) {
+ return vFinal + (v0 - vFinal) * Math.exp(-(t - t0) / timeConstant);
+}
+
+// AudioParam setValueCurve value at time t for a setValueCurve starting at time
+// t0 with curve, curve, and duration duration. The sample rate is sampleRate.
+// It is assumed that t0 <= t.
+function audioParamSetValueCurve(t, curve, t0, duration) {
+ if (t > t0 + duration)
+ return curve[curve.length - 1];
+
+ let curvePointsPerSecond = (curve.length - 1) / duration;
+
+ let virtualIndex = (t - t0) * curvePointsPerSecond;
+ let index = Math.floor(virtualIndex);
+
+ let delta = virtualIndex - index;
+
+ let c0 = curve[index];
+ let c1 = curve[Math.min(index + 1, curve.length - 1)];
+ return c0 + (c1 - c0) * delta;
+}
diff --git a/testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js b/testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js
new file mode 100644
index 0000000000..2233641914
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/audiobuffersource-testing.js
@@ -0,0 +1,102 @@
+function createTestBuffer(context, sampleFrameLength) {
+ let audioBuffer =
+ context.createBuffer(1, sampleFrameLength, context.sampleRate);
+ let channelData = audioBuffer.getChannelData(0);
+
+ // Create a simple linear ramp starting at zero, with each value in the buffer
+ // equal to its index position.
+ for (let i = 0; i < sampleFrameLength; ++i)
+ channelData[i] = i;
+
+ return audioBuffer;
+}
+
+function checkSingleTest(renderedBuffer, i, should) {
+ let renderedData = renderedBuffer.getChannelData(0);
+ let offsetFrame = i * testSpacingFrames;
+
+ let test = tests[i];
+ let expected = test.expected;
+ let description;
+
+ if (test.description) {
+ description = test.description;
+ } else {
+ // No description given, so create a basic one from the given test
+ // parameters.
+ description =
+ 'loop from ' + test.loopStartFrame + ' -> ' + test.loopEndFrame;
+ if (test.offsetFrame)
+ description += ' with offset ' + test.offsetFrame;
+ if (test.playbackRate && test.playbackRate != 1)
+ description += ' with playbackRate of ' + test.playbackRate;
+ }
+
+ let framesToTest;
+
+ if (test.renderFrames)
+ framesToTest = test.renderFrames;
+ else if (test.durationFrames)
+ framesToTest = test.durationFrames;
+
+ // Verify that the output matches
+ let prefix = 'Case ' + i + ': ';
+ should(
+ renderedData.slice(offsetFrame, offsetFrame + framesToTest),
+ prefix + description)
+ .beEqualToArray(expected);
+
+ // Verify that we get all zeroes after the buffer (or duration) has passed.
+ should(
+ renderedData.slice(
+ offsetFrame + framesToTest, offsetFrame + testSpacingFrames),
+ prefix + description + ': tail')
+ .beConstantValueOf(0);
+}
+
+function checkAllTests(renderedBuffer, should) {
+ for (let i = 0; i < tests.length; ++i)
+ checkSingleTest(renderedBuffer, i, should);
+}
+
+
+// Create the actual result by modulating playbackRate or detune AudioParam of
+// ABSN. |modTarget| is a string of AudioParam name, |modOffset| is the offset
+// (anchor) point of modulation, and |modRange| is the range of modulation.
+//
+// createSawtoothWithModulation(context, 'detune', 440, 1200);
+//
+// The above will perform a modulation on detune within the range of
+// [1200, -1200] around the sawtooth waveform on 440Hz.
+function createSawtoothWithModulation(context, modTarget, modOffset, modRange) {
+ let lfo = context.createOscillator();
+ let amp = context.createGain();
+
+ // Create a sawtooth generator with the signal range of [0, 1].
+ let phasor = context.createBufferSource();
+ let phasorBuffer = context.createBuffer(1, sampleRate, sampleRate);
+ let phasorArray = phasorBuffer.getChannelData(0);
+ let phase = 0, phaseStep = 1 / sampleRate;
+ for (let i = 0; i < phasorArray.length; i++) {
+ phasorArray[i] = phase % 1.0;
+ phase += phaseStep;
+ }
+ phasor.buffer = phasorBuffer;
+ phasor.loop = true;
+
+ // 1Hz for audible (human-perceivable) parameter modulation by LFO.
+ lfo.frequency.value = 1.0;
+
+ amp.gain.value = modRange;
+ phasor.playbackRate.value = modOffset;
+
+ // The oscillator output should be amplified accordingly to drive the
+ // modulation within the desired range.
+ lfo.connect(amp);
+ amp.connect(phasor[modTarget]);
+
+ phasor.connect(context.destination);
+
+ lfo.start();
+ phasor.start();
+}
diff --git a/testing/web-platform/tests/webaudio/resources/audionodeoptions.js b/testing/web-platform/tests/webaudio/resources/audionodeoptions.js
new file mode 100644
index 0000000000..3b7867cabf
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/audionodeoptions.js
@@ -0,0 +1,292 @@
+// Test that constructor for the node with name |nodeName| handles the
+// various possible values for channelCount, channelCountMode, and
+// channelInterpretation.
+
+// The |should| parameter is the test function from new |Audit|.
+function testAudioNodeOptions(should, context, nodeName, expectedNodeOptions) {
+ if (expectedNodeOptions === undefined)
+ expectedNodeOptions = {};
+ let node;
+
+ // Test that we can set channelCount and that errors are thrown for
+ // invalid values
+ let testChannelCount = 17;
+ if (expectedNodeOptions.channelCount) {
+ testChannelCount = expectedNodeOptions.channelCount.value;
+ }
+ should(
+ () => {
+ node = new window[nodeName](
+ context, Object.assign({}, expectedNodeOptions.additionalOptions, {
+ channelCount: testChannelCount
+ }));
+ },
+ 'new ' + nodeName + '(c, {channelCount: ' + testChannelCount + '})')
+ .notThrow();
+ should(node.channelCount, 'node.channelCount').beEqualTo(testChannelCount);
+
+ if (expectedNodeOptions.channelCount &&
+ expectedNodeOptions.channelCount.isFixed) {
+ // The channel count is fixed. Verify that we throw an error if
+ // we try to change it. Arbitrarily set the count to be one more
+ // than the expected value.
+ testChannelCount = expectedNodeOptions.channelCount.value + 1;
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelCount: testChannelCount}));
+ },
+ 'new ' + nodeName + '(c, {channelCount: ' + testChannelCount + '})')
+ .throw(DOMException,
+ expectedNodeOptions.channelCount.exceptionType);
+ // And test that setting it to the fixed value does not throw.
+ testChannelCount = expectedNodeOptions.channelCount.value;
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelCount: testChannelCount}));
+ node.channelCount = testChannelCount;
+ },
+ '(new ' + nodeName + '(c, {channelCount: ' + testChannelCount + '})).channelCount = ' + testChannelCount)
+ .notThrow();
+ } else {
+ // The channel count is not fixed. Try to set the count to invalid
+ // values and make sure an error is thrown.
+ [0, 99].forEach(testValue => {
+ should(() => {
+ node = new window[nodeName](
+ context, Object.assign({}, expectedNodeOptions.additionalOptions, {
+ channelCount: testValue
+ }));
+ }, `new ${nodeName}(c, {channelCount: ${testValue}})`)
+ .throw(DOMException, 'NotSupportedError');
+ });
+ }
+
+ // Test channelCountMode
+ let testChannelCountMode = 'max';
+ if (expectedNodeOptions.channelCountMode) {
+ testChannelCountMode = expectedNodeOptions.channelCountMode.value;
+ }
+ should(
+ () => {
+ node = new window[nodeName](
+ context, Object.assign({}, expectedNodeOptions.additionalOptions, {
+ channelCountMode: testChannelCountMode
+ }));
+ },
+ 'new ' + nodeName + '(c, {channelCountMode: "' + testChannelCountMode +
+ '"}')
+ .notThrow();
+ should(node.channelCountMode, 'node.channelCountMode')
+ .beEqualTo(testChannelCountMode);
+
+ if (expectedNodeOptions.channelCountMode &&
+ expectedNodeOptions.channelCountMode.isFixed) {
+ // Channel count mode is fixed. Test setting to something else throws.
+ ['max', 'clamped-max', 'explicit'].forEach(testValue => {
+ if (testValue !== expectedNodeOptions.channelCountMode.value) {
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelCountMode: testValue}));
+ },
+ `new ${nodeName}(c, {channelCountMode: "${testValue}"})`)
+ .throw(DOMException,
+ expectedNodeOptions.channelCountMode.exceptionType);
+ } else {
+ // Test that explicitly setting the the fixed value is allowed.
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelCountMode: testValue}));
+ node.channelCountMode = testValue;
+ },
+ `(new ${nodeName}(c, {channelCountMode: "${testValue}"})).channelCountMode = "${testValue}"`)
+ .notThrow();
+ }
+ });
+ } else {
+ // Mode is not fixed. Verify that we can set the mode to all valid
+ // values, and that we throw for invalid values.
+
+ let testValues = ['max', 'clamped-max', 'explicit'];
+
+ testValues.forEach(testValue => {
+ should(() => {
+ node = new window[nodeName](
+ context, Object.assign({}, expectedNodeOptions.additionalOptions, {
+ channelCountMode: testValue
+ }));
+ }, `new ${nodeName}(c, {channelCountMode: "${testValue}"})`).notThrow();
+ should(
+ node.channelCountMode, 'node.channelCountMode after valid setter')
+ .beEqualTo(testValue);
+
+ });
+
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelCountMode: 'foobar'}));
+ },
+ 'new ' + nodeName + '(c, {channelCountMode: "foobar"}')
+ .throw(TypeError);
+ should(node.channelCountMode, 'node.channelCountMode after invalid setter')
+ .beEqualTo(testValues[testValues.length - 1]);
+ }
+
+ // Test channelInterpretation
+ if (expectedNodeOptions.channelInterpretation &&
+ expectedNodeOptions.channelInterpretation.isFixed) {
+ // The channel interpretation is fixed. Verify that we throw an
+ // error if we try to change it.
+ ['speakers', 'discrete'].forEach(testValue => {
+ if (testValue !== expectedNodeOptions.channelInterpretation.value) {
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionOptions,
+ {channelInterpretation: testValue}));
+ },
+ `new ${nodeName}(c, {channelInterpretation: "${testValue}"})`)
+ .throw(DOMException,
+ expectedNodeOptions.channelCountMode.exceptionType);
+ } else {
+ // Check that assigning the fixed value is OK.
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionOptions,
+ {channelInterpretation: testValue}));
+ node.channelInterpretation = testValue;
+ },
+ `(new ${nodeName}(c, {channelInterpretation: "${testValue}"})).channelInterpretation = "${testValue}"`)
+ .notThrow();
+ }
+ });
+ } else {
+ // Channel interpretation is not fixed. Verify that we can set it
+ // to all possible values.
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelInterpretation: 'speakers'}));
+ },
+ 'new ' + nodeName + '(c, {channelInterpretation: "speakers"})')
+ .notThrow();
+ should(node.channelInterpretation, 'node.channelInterpretation')
+ .beEqualTo('speakers');
+
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelInterpretation: 'discrete'}));
+ },
+ 'new ' + nodeName + '(c, {channelInterpretation: "discrete"})')
+ .notThrow();
+ should(node.channelInterpretation, 'node.channelInterpretation')
+ .beEqualTo('discrete');
+
+ should(
+ () => {
+ node = new window[nodeName](
+ context,
+ Object.assign(
+ {}, expectedNodeOptions.additionalOptions,
+ {channelInterpretation: 'foobar'}));
+ },
+ 'new ' + nodeName + '(c, {channelInterpretation: "foobar"})')
+ .throw(TypeError);
+ should(
+ node.channelInterpretation,
+ 'node.channelInterpretation after invalid setter')
+ .beEqualTo('discrete');
+ }
+}
+
+function initializeContext(should) {
+ let c;
+ should(() => {
+ c = new OfflineAudioContext(1, 1, 48000);
+ }, 'context = new OfflineAudioContext(...)').notThrow();
+
+ return c;
+}
+
+function testInvalidConstructor(should, name, context) {
+ should(() => {
+ new window[name]();
+ }, 'new ' + name + '()').throw(TypeError);
+ should(() => {
+ new window[name](1);
+ }, 'new ' + name + '(1)').throw(TypeError);
+ should(() => {
+ new window[name](context, 42);
+ }, 'new ' + name + '(context, 42)').throw(TypeError);
+}
+
+function testDefaultConstructor(should, name, context, options) {
+ let node;
+
+ let message = options.prefix + ' = new ' + name + '(context';
+ if (options.constructorOptions)
+ message += ', ' + JSON.stringify(options.constructorOptions);
+ message += ')'
+
+ should(() => {
+ node = new window[name](context, options.constructorOptions);
+ }, message).notThrow();
+
+ should(node instanceof window[name], options.prefix + ' instanceof ' + name)
+ .beEqualTo(true);
+ should(node.numberOfInputs, options.prefix + '.numberOfInputs')
+ .beEqualTo(options.numberOfInputs);
+ should(node.numberOfOutputs, options.prefix + '.numberOfOutputs')
+ .beEqualTo(options.numberOfOutputs);
+ should(node.channelCount, options.prefix + '.channelCount')
+ .beEqualTo(options.channelCount);
+ should(node.channelCountMode, options.prefix + '.channelCountMode')
+ .beEqualTo(options.channelCountMode);
+ should(node.channelInterpretation, options.prefix + '.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ return node;
+}
+
+function testDefaultAttributes(should, node, prefix, items) {
+ items.forEach((item) => {
+ let attr = node[item.name];
+ if (attr instanceof AudioParam) {
+ should(attr.value, prefix + '.' + item.name + '.value')
+ .beEqualTo(item.value);
+ } else {
+ should(attr, prefix + '.' + item.name).beEqualTo(item.value);
+ }
+ });
+}
diff --git a/testing/web-platform/tests/webaudio/resources/audioparam-testing.js b/testing/web-platform/tests/webaudio/resources/audioparam-testing.js
new file mode 100644
index 0000000000..bc90ddbef8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/audioparam-testing.js
@@ -0,0 +1,554 @@
+(function(global) {
+
+ // Information about the starting/ending times and starting/ending values for
+ // each time interval.
+ let timeValueInfo;
+
+ // The difference between starting values between each time interval.
+ let startingValueDelta;
+
+ // For any automation function that has an end or target value, the end value
+ // is based the starting value of the time interval. The starting value will
+ // be increased or decreased by |startEndValueChange|. We choose half of
+ // |startingValueDelta| so that the ending value will be distinct from the
+ // starting value for next time interval. This allows us to detect where the
+ // ramp begins and ends.
+ let startEndValueChange;
+
+ // Default threshold to use for detecting discontinuities that should appear
+ // at each time interval.
+ let discontinuityThreshold;
+
+ // Time interval between value changes. It is best if 1 / numberOfTests is
+ // not close to timeInterval.
+ let timeIntervalInternal = .03;
+
+ let context;
+
+ // Make sure we render long enough to capture all of our test data.
+ function renderLength(numberOfTests) {
+ return timeToSampleFrame((numberOfTests + 1) * timeInterval, sampleRate);
+ }
+
+ // Create a constant reference signal with the given |value|. Basically the
+ // same as |createConstantBuffer|, but with the parameters to match the other
+ // create functions. The |endValue| is ignored.
+ function createConstantArray(
+ startTime, endTime, value, endValue, sampleRate) {
+ let startFrame = timeToSampleFrame(startTime, sampleRate);
+ let endFrame = timeToSampleFrame(endTime, sampleRate);
+ let length = endFrame - startFrame;
+
+ let buffer = createConstantBuffer(context, length, value);
+
+ return buffer.getChannelData(0);
+ }
+
+ function getStartEndFrames(startTime, endTime, sampleRate) {
+ // Start frame is the ceiling of the start time because the ramp starts at
+ // or after the sample frame. End frame is the ceiling because it's the
+ // exclusive ending frame of the automation.
+ let startFrame = Math.ceil(startTime * sampleRate);
+ let endFrame = Math.ceil(endTime * sampleRate);
+
+ return {startFrame: startFrame, endFrame: endFrame};
+ }
+
+ // Create a linear ramp starting at |startValue| and ending at |endValue|. The
+ // ramp starts at time |startTime| and ends at |endTime|. (The start and end
+ // times are only used to compute how many samples to return.)
+ function createLinearRampArray(
+ startTime, endTime, startValue, endValue, sampleRate) {
+ let frameInfo = getStartEndFrames(startTime, endTime, sampleRate);
+ let startFrame = frameInfo.startFrame;
+ let endFrame = frameInfo.endFrame;
+ let length = endFrame - startFrame;
+ let array = new Array(length);
+
+ let step = Math.fround(
+ (endValue - startValue) / (endTime - startTime) / sampleRate);
+ let start = Math.fround(
+ startValue +
+ (endValue - startValue) * (startFrame / sampleRate - startTime) /
+ (endTime - startTime));
+
+ let slope = (endValue - startValue) / (endTime - startTime);
+
+ // v(t) = v0 + (v1 - v0)*(t-t0)/(t1-t0)
+ for (k = 0; k < length; ++k) {
+ // array[k] = Math.fround(start + k * step);
+ let t = (startFrame + k) / sampleRate;
+ array[k] = startValue + slope * (t - startTime);
+ }
+
+ return array;
+ }
+
+ // Create an exponential ramp starting at |startValue| and ending at
+ // |endValue|. The ramp starts at time |startTime| and ends at |endTime|.
+ // (The start and end times are only used to compute how many samples to
+ // return.)
+ function createExponentialRampArray(
+ startTime, endTime, startValue, endValue, sampleRate) {
+ let deltaTime = endTime - startTime;
+
+ let frameInfo = getStartEndFrames(startTime, endTime, sampleRate);
+ let startFrame = frameInfo.startFrame;
+ let endFrame = frameInfo.endFrame;
+ let length = endFrame - startFrame;
+ let array = new Array(length);
+
+ let ratio = endValue / startValue;
+
+ // v(t) = v0*(v1/v0)^((t-t0)/(t1-t0))
+ for (let k = 0; k < length; ++k) {
+ let t = Math.fround((startFrame + k) / sampleRate);
+ array[k] = Math.fround(
+ startValue * Math.pow(ratio, (t - startTime) / deltaTime));
+ }
+
+ return array;
+ }
+
+ function discreteTimeConstantForSampleRate(timeConstant, sampleRate) {
+ return 1 - Math.exp(-1 / (sampleRate * timeConstant));
+ }
+
+ // Create a signal that starts at |startValue| and exponentially approaches
+ // the target value of |targetValue|, using a time constant of |timeConstant|.
+ // The ramp starts at time |startTime| and ends at |endTime|. (The start and
+ // end times are only used to compute how many samples to return.)
+ function createExponentialApproachArray(
+ startTime, endTime, startValue, targetValue, sampleRate, timeConstant) {
+ let startFrameFloat = startTime * sampleRate;
+ let frameInfo = getStartEndFrames(startTime, endTime, sampleRate);
+ let startFrame = frameInfo.startFrame;
+ let endFrame = frameInfo.endFrame;
+ let length = Math.floor(endFrame - startFrame);
+ let array = new Array(length);
+ let c = discreteTimeConstantForSampleRate(timeConstant, sampleRate);
+
+ let delta = startValue - targetValue;
+
+ // v(t) = v1 + (v0 - v1) * exp(-(t-t0)/tau)
+ for (let k = 0; k < length; ++k) {
+ let t = (startFrame + k) / sampleRate;
+ let value =
+ targetValue + delta * Math.exp(-(t - startTime) / timeConstant);
+ array[k] = value;
+ }
+
+ return array;
+ }
+
+ // Create a sine wave of the specified duration.
+ function createReferenceSineArray(
+ startTime, endTime, startValue, endValue, sampleRate) {
+ // Ignore |startValue| and |endValue| for the sine wave.
+ let curve = createSineWaveArray(
+ endTime - startTime, freqHz, sineAmplitude, sampleRate);
+ // Sample the curve appropriately.
+ let frameInfo = getStartEndFrames(startTime, endTime, sampleRate);
+ let startFrame = frameInfo.startFrame;
+ let endFrame = frameInfo.endFrame;
+ let length = Math.floor(endFrame - startFrame);
+ let array = new Array(length);
+
+ // v(t) = linearly interpolate between V[k] and V[k + 1] where k =
+ // floor((N-1)/duration*(t - t0))
+ let f = (length - 1) / (endTime - startTime);
+
+ for (let k = 0; k < length; ++k) {
+ let t = (startFrame + k) / sampleRate;
+ let indexFloat = f * (t - startTime);
+ let index = Math.floor(indexFloat);
+ if (index + 1 < length) {
+ let v0 = curve[index];
+ let v1 = curve[index + 1];
+ array[k] = v0 + (v1 - v0) * (indexFloat - index);
+ } else {
+ array[k] = curve[length - 1];
+ }
+ }
+
+ return array;
+ }
+
+ // Create a sine wave of the given frequency and amplitude. The sine wave is
+ // offset by half the amplitude so that result is always positive.
+ function createSineWaveArray(durationSeconds, freqHz, amplitude, sampleRate) {
+ let length = timeToSampleFrame(durationSeconds, sampleRate);
+ let signal = new Float32Array(length);
+ let omega = 2 * Math.PI * freqHz / sampleRate;
+ let halfAmplitude = amplitude / 2;
+
+ for (let k = 0; k < length; ++k) {
+ signal[k] = halfAmplitude + halfAmplitude * Math.sin(omega * k);
+ }
+
+ return signal;
+ }
+
+ // Return the difference between the starting value and the ending value for
+ // time interval |timeIntervalIndex|. We alternate between an end value that
+ // is above or below the starting value.
+ function endValueDelta(timeIntervalIndex) {
+ if (timeIntervalIndex & 1) {
+ return -startEndValueChange;
+ } else {
+ return startEndValueChange;
+ }
+ }
+
+ // Relative error metric
+ function relativeErrorMetric(actual, expected) {
+ return (actual - expected) / Math.abs(expected);
+ }
+
+ // Difference metric
+ function differenceErrorMetric(actual, expected) {
+ return actual - expected;
+ }
+
+ // Return the difference between the starting value at |timeIntervalIndex| and
+ // the starting value at the next time interval. Since we started at a large
+ // initial value, we decrease the value at each time interval.
+ function valueUpdate(timeIntervalIndex) {
+ return -startingValueDelta;
+ }
+
+ // Compare a section of the rendered data against our expected signal.
+ function comparePartialSignals(
+ should, rendered, expectedFunction, startTime, endTime, valueInfo,
+ sampleRate, errorMetric) {
+ let startSample = timeToSampleFrame(startTime, sampleRate);
+ let expected = expectedFunction(
+ startTime, endTime, valueInfo.startValue, valueInfo.endValue,
+ sampleRate, timeConstant);
+
+ let n = expected.length;
+ let maxError = -1;
+ let maxErrorIndex = -1;
+
+ for (let k = 0; k < n; ++k) {
+ // Make sure we don't pass these tests because a NaN has been generated in
+ // either the
+ // rendered data or the reference data.
+ if (!isValidNumber(rendered[startSample + k])) {
+ maxError = Infinity;
+ maxErrorIndex = startSample + k;
+ should(
+ isValidNumber(rendered[startSample + k]),
+ 'NaN or infinity for rendered data at ' + maxErrorIndex)
+ .beTrue();
+ break;
+ }
+ if (!isValidNumber(expected[k])) {
+ maxError = Infinity;
+ maxErrorIndex = startSample + k;
+ should(
+ isValidNumber(expected[k]),
+ 'NaN or infinity for rendered data at ' + maxErrorIndex)
+ .beTrue();
+ break;
+ }
+ let error = Math.abs(errorMetric(rendered[startSample + k], expected[k]));
+ if (error > maxError) {
+ maxError = error;
+ maxErrorIndex = k;
+ }
+ }
+
+ return {maxError: maxError, index: maxErrorIndex, expected: expected};
+ }
+
+ // Find the discontinuities in the data and compare the locations of the
+ // discontinuities with the times that define the time intervals. There is a
+ // discontinuity if the difference between successive samples exceeds the
+ // threshold.
+ function verifyDiscontinuities(should, values, times, threshold) {
+ let n = values.length;
+ let success = true;
+ let badLocations = 0;
+ let breaks = [];
+
+ // Find discontinuities.
+ for (let k = 1; k < n; ++k) {
+ if (Math.abs(values[k] - values[k - 1]) > threshold) {
+ breaks.push(k);
+ }
+ }
+
+ let testCount;
+
+ // If there are numberOfTests intervals, there are only numberOfTests - 1
+ // internal interval boundaries. Hence the maximum number of discontinuties
+ // we expect to find is numberOfTests - 1. If we find more than that, we
+ // have no reference to compare against. We also assume that the actual
+ // discontinuities are close to the expected ones.
+ //
+ // This is just a sanity check when something goes really wrong. For
+ // example, if the threshold is too low, every sample frame looks like a
+ // discontinuity.
+ if (breaks.length >= numberOfTests) {
+ testCount = numberOfTests - 1;
+ should(breaks.length, 'Number of discontinuities')
+ .beLessThan(numberOfTests);
+ success = false;
+ } else {
+ testCount = breaks.length;
+ }
+
+ // Compare the location of each discontinuity with the end time of each
+ // interval. (There is no discontinuity at the start of the signal.)
+ for (let k = 0; k < testCount; ++k) {
+ let expectedSampleFrame = timeToSampleFrame(times[k + 1], sampleRate);
+ if (breaks[k] != expectedSampleFrame) {
+ success = false;
+ ++badLocations;
+ should(breaks[k], 'Discontinuity at index')
+ .beEqualTo(expectedSampleFrame);
+ }
+ }
+
+ if (badLocations) {
+ should(badLocations, 'Number of discontinuites at incorrect locations')
+ .beEqualTo(0);
+ success = false;
+ } else {
+ should(
+ breaks.length + 1,
+ 'Number of tests started and ended at the correct time')
+ .beEqualTo(numberOfTests);
+ }
+
+ return success;
+ }
+
+ // Compare the rendered data with the expected data.
+ //
+ // testName - string describing the test
+ //
+ // maxError - maximum allowed difference between the rendered data and the
+ // expected data
+ //
+ // rendererdData - array containing the rendered (actual) data
+ //
+ // expectedFunction - function to compute the expected data
+ //
+ // timeValueInfo - array containing information about the start and end times
+ // and the start and end values of each interval.
+ //
+ // breakThreshold - threshold to use for determining discontinuities.
+ function compareSignals(
+ should, testName, maxError, renderedData, expectedFunction, timeValueInfo,
+ breakThreshold, errorMetric) {
+ let success = true;
+ let failedTestCount = 0;
+ let times = timeValueInfo.times;
+ let values = timeValueInfo.values;
+ let n = values.length;
+ let expectedSignal = [];
+
+ success =
+ verifyDiscontinuities(should, renderedData, times, breakThreshold);
+
+ for (let k = 0; k < n; ++k) {
+ let result = comparePartialSignals(
+ should, renderedData, expectedFunction, times[k], times[k + 1],
+ values[k], sampleRate, errorMetric);
+
+ expectedSignal =
+ expectedSignal.concat(Array.prototype.slice.call(result.expected));
+
+ should(
+ result.maxError,
+ 'Max error for test ' + k + ' at offset ' +
+ (result.index + timeToSampleFrame(times[k], sampleRate)))
+ .beLessThanOrEqualTo(maxError);
+ }
+
+ should(
+ failedTestCount,
+ 'Number of failed tests with an acceptable relative tolerance of ' +
+ maxError)
+ .beEqualTo(0);
+ }
+
+ // Create a function to test the rendered data with the reference data.
+ //
+ // testName - string describing the test
+ //
+ // error - max allowed error between rendered data and the reference data.
+ //
+ // referenceFunction - function that generates the reference data to be
+ // compared with the rendered data.
+ //
+ // jumpThreshold - optional parameter that specifies the threshold to use for
+ // detecting discontinuities. If not specified, defaults to
+ // discontinuityThreshold.
+ //
+ function checkResultFunction(
+ task, should, testName, error, referenceFunction, jumpThreshold,
+ errorMetric) {
+ return function(event) {
+ let buffer = event.renderedBuffer;
+ renderedData = buffer.getChannelData(0);
+
+ let threshold;
+
+ if (!jumpThreshold) {
+ threshold = discontinuityThreshold;
+ } else {
+ threshold = jumpThreshold;
+ }
+
+ compareSignals(
+ should, testName, error, renderedData, referenceFunction,
+ timeValueInfo, threshold, errorMetric);
+ task.done();
+ }
+ }
+
+ // Run all the automation tests.
+ //
+ // numberOfTests - number of tests (time intervals) to run.
+ //
+ // initialValue - The initial value of the first time interval.
+ //
+ // setValueFunction - function that sets the specified value at the start of a
+ // time interval.
+ //
+ // automationFunction - function that sets the end value for the time
+ // interval. It specifies how the value approaches the end value.
+ //
+ // An object is returned containing an array of start times for each time
+ // interval, and an array giving the start and end values for the interval.
+ function doAutomation(
+ numberOfTests, initialValue, setValueFunction, automationFunction) {
+ let timeInfo = [0];
+ let valueInfo = [];
+ let value = initialValue;
+
+ for (let k = 0; k < numberOfTests; ++k) {
+ let startTime = k * timeInterval;
+ let endTime = (k + 1) * timeInterval;
+ let endValue = value + endValueDelta(k);
+
+ // Set the value at the start of the time interval.
+ setValueFunction(value, startTime);
+
+ // Specify the end or target value, and how we should approach it.
+ automationFunction(endValue, startTime, endTime);
+
+ // Keep track of the start times, and the start and end values for each
+ // time interval.
+ timeInfo.push(endTime);
+ valueInfo.push({startValue: value, endValue: endValue});
+
+ value += valueUpdate(k);
+ }
+
+ return {times: timeInfo, values: valueInfo};
+ }
+
+ // Create the audio graph for the test and then run the test.
+ //
+ // numberOfTests - number of time intervals (tests) to run.
+ //
+ // initialValue - the initial value of the gain at time 0.
+ //
+ // setValueFunction - function to set the value at the beginning of each time
+ // interval.
+ //
+ // automationFunction - the AudioParamTimeline automation function
+ //
+ // testName - string indicating the test that is being run.
+ //
+ // maxError - maximum allowed error between the rendered data and the
+ // reference data
+ //
+ // referenceFunction - function that generates the reference data to be
+ // compared against the rendered data.
+ //
+ // jumpThreshold - optional parameter that specifies the threshold to use for
+ // detecting discontinuities. If not specified, defaults to
+ // discontinuityThreshold.
+ //
+ function createAudioGraphAndTest(
+ task, should, numberOfTests, initialValue, setValueFunction,
+ automationFunction, testName, maxError, referenceFunction, jumpThreshold,
+ errorMetric) {
+ // Create offline audio context.
+ context =
+ new OfflineAudioContext(2, renderLength(numberOfTests), sampleRate);
+ let constantBuffer =
+ createConstantBuffer(context, renderLength(numberOfTests), 1);
+
+ // We use an AudioGainNode here simply as a convenient way to test the
+ // AudioParam automation, since it's easy to pass a constant value through
+ // the node, automate the .gain attribute and observe the resulting values.
+
+ gainNode = context.createGain();
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = constantBuffer;
+ bufferSource.connect(gainNode);
+ gainNode.connect(context.destination);
+
+ // Set up default values for the parameters that control how the automation
+ // test values progress for each time interval.
+ startingValueDelta = initialValue / numberOfTests;
+ startEndValueChange = startingValueDelta / 2;
+ discontinuityThreshold = startEndValueChange / 2;
+
+ // Run the automation tests.
+ timeValueInfo = doAutomation(
+ numberOfTests, initialValue, setValueFunction, automationFunction);
+ bufferSource.start(0);
+
+ context.oncomplete = checkResultFunction(
+ task, should, testName, maxError, referenceFunction, jumpThreshold,
+ errorMetric || relativeErrorMetric);
+ context.startRendering();
+ }
+
+ // Export local references to global scope. All the new objects in this file
+ // must be exported through this if it is to be used in the actual test HTML
+ // page.
+ let exports = {
+ 'sampleRate': 44100,
+ 'gainNode': null,
+ 'timeInterval': timeIntervalInternal,
+
+ // Some suitable time constant so that we can see a significant change over
+ // a timeInterval. This is only needed by setTargetAtTime() which needs a
+ // time constant.
+ 'timeConstant': timeIntervalInternal / 3,
+
+ 'renderLength': renderLength,
+ 'createConstantArray': createConstantArray,
+ 'getStartEndFrames': getStartEndFrames,
+ 'createLinearRampArray': createLinearRampArray,
+ 'createExponentialRampArray': createExponentialRampArray,
+ 'discreteTimeConstantForSampleRate': discreteTimeConstantForSampleRate,
+ 'createExponentialApproachArray': createExponentialApproachArray,
+ 'createReferenceSineArray': createReferenceSineArray,
+ 'createSineWaveArray': createSineWaveArray,
+ 'endValueDelta': endValueDelta,
+ 'relativeErrorMetric': relativeErrorMetric,
+ 'differenceErrorMetric': differenceErrorMetric,
+ 'valueUpdate': valueUpdate,
+ 'comparePartialSignals': comparePartialSignals,
+ 'verifyDiscontinuities': verifyDiscontinuities,
+ 'compareSignals': compareSignals,
+ 'checkResultFunction': checkResultFunction,
+ 'doAutomation': doAutomation,
+ 'createAudioGraphAndTest': createAudioGraphAndTest
+ };
+
+ for (let reference in exports) {
+ global[reference] = exports[reference];
+ }
+
+})(window);
diff --git a/testing/web-platform/tests/webaudio/resources/audit-util.js b/testing/web-platform/tests/webaudio/resources/audit-util.js
new file mode 100644
index 0000000000..a4dea79658
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/audit-util.js
@@ -0,0 +1,195 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+/**
+ * @fileOverview This file includes legacy utility functions for the layout
+ * test.
+ */
+
+// How many frames in a WebAudio render quantum.
+let RENDER_QUANTUM_FRAMES = 128;
+
+// Compare two arrays (commonly extracted from buffer.getChannelData()) with
+// constraints:
+// options.thresholdSNR: Minimum allowed SNR between the actual and expected
+// signal. The default value is 10000.
+// options.thresholdDiffULP: Maximum allowed difference between the actual
+// and expected signal in ULP(Unit in the last place). The default is 0.
+// options.thresholdDiffCount: Maximum allowed number of sample differences
+// which exceeds the threshold. The default is 0.
+// options.bitDepth: The expected result is assumed to come from an audio
+// file with this number of bits of precision. The default is 16.
+function compareBuffersWithConstraints(should, actual, expected, options) {
+ if (!options)
+ options = {};
+
+ // Only print out the message if the lengths are different; the
+ // expectation is that they are the same, so don't clutter up the
+ // output.
+ if (actual.length !== expected.length) {
+ should(
+ actual.length === expected.length,
+ 'Length of actual and expected buffers should match')
+ .beTrue();
+ }
+
+ let maxError = -1;
+ let diffCount = 0;
+ let errorPosition = -1;
+ let thresholdSNR = (options.thresholdSNR || 10000);
+
+ let thresholdDiffULP = (options.thresholdDiffULP || 0);
+ let thresholdDiffCount = (options.thresholdDiffCount || 0);
+
+ // By default, the bit depth is 16.
+ let bitDepth = (options.bitDepth || 16);
+ let scaleFactor = Math.pow(2, bitDepth - 1);
+
+ let noisePower = 0, signalPower = 0;
+
+ for (let i = 0; i < actual.length; i++) {
+ let diff = actual[i] - expected[i];
+ noisePower += diff * diff;
+ signalPower += expected[i] * expected[i];
+
+ if (Math.abs(diff) > maxError) {
+ maxError = Math.abs(diff);
+ errorPosition = i;
+ }
+
+ // The reference file is a 16-bit WAV file, so we will almost never get
+ // an exact match between it and the actual floating-point result.
+ if (Math.abs(diff) > scaleFactor)
+ diffCount++;
+ }
+
+ let snr = 10 * Math.log10(signalPower / noisePower);
+ let maxErrorULP = maxError * scaleFactor;
+
+ should(snr, 'SNR').beGreaterThanOrEqualTo(thresholdSNR);
+
+ should(
+ maxErrorULP,
+ options.prefix + ': Maximum difference (in ulp units (' + bitDepth +
+ '-bits))')
+ .beLessThanOrEqualTo(thresholdDiffULP);
+
+ should(diffCount, options.prefix + ': Number of differences between results')
+ .beLessThanOrEqualTo(thresholdDiffCount);
+}
+
+// Create an impulse in a buffer of length sampleFrameLength
+function createImpulseBuffer(context, sampleFrameLength) {
+ let audioBuffer =
+ context.createBuffer(1, sampleFrameLength, context.sampleRate);
+ let n = audioBuffer.length;
+ let dataL = audioBuffer.getChannelData(0);
+
+ for (let k = 0; k < n; ++k) {
+ dataL[k] = 0;
+ }
+ dataL[0] = 1;
+
+ return audioBuffer;
+}
+
+// Create a buffer of the given length with a linear ramp having values 0 <= x <
+// 1.
+function createLinearRampBuffer(context, sampleFrameLength) {
+ let audioBuffer =
+ context.createBuffer(1, sampleFrameLength, context.sampleRate);
+ let n = audioBuffer.length;
+ let dataL = audioBuffer.getChannelData(0);
+
+ for (let i = 0; i < n; ++i)
+ dataL[i] = i / n;
+
+ return audioBuffer;
+}
+
+// Create an AudioBuffer of length |sampleFrameLength| having a constant value
+// |constantValue|. If |constantValue| is a number, the buffer has one channel
+// filled with that value. If |constantValue| is an array, the buffer is created
+// wit a number of channels equal to the length of the array, and channel k is
+// filled with the k'th element of the |constantValue| array.
+function createConstantBuffer(context, sampleFrameLength, constantValue) {
+ let channels;
+ let values;
+
+ if (typeof constantValue === 'number') {
+ channels = 1;
+ values = [constantValue];
+ } else {
+ channels = constantValue.length;
+ values = constantValue;
+ }
+
+ let audioBuffer =
+ context.createBuffer(channels, sampleFrameLength, context.sampleRate);
+ let n = audioBuffer.length;
+
+ for (let c = 0; c < channels; ++c) {
+ let data = audioBuffer.getChannelData(c);
+ for (let i = 0; i < n; ++i)
+ data[i] = values[c];
+ }
+
+ return audioBuffer;
+}
+
+// Create a stereo impulse in a buffer of length sampleFrameLength
+function createStereoImpulseBuffer(context, sampleFrameLength) {
+ let audioBuffer =
+ context.createBuffer(2, sampleFrameLength, context.sampleRate);
+ let n = audioBuffer.length;
+ let dataL = audioBuffer.getChannelData(0);
+ let dataR = audioBuffer.getChannelData(1);
+
+ for (let k = 0; k < n; ++k) {
+ dataL[k] = 0;
+ dataR[k] = 0;
+ }
+ dataL[0] = 1;
+ dataR[0] = 1;
+
+ return audioBuffer;
+}
+
+// Convert time (in seconds) to sample frames.
+function timeToSampleFrame(time, sampleRate) {
+ return Math.floor(0.5 + time * sampleRate);
+}
+
+// Compute the number of sample frames consumed by noteGrainOn with
+// the specified |grainOffset|, |duration|, and |sampleRate|.
+function grainLengthInSampleFrames(grainOffset, duration, sampleRate) {
+ let startFrame = timeToSampleFrame(grainOffset, sampleRate);
+ let endFrame = timeToSampleFrame(grainOffset + duration, sampleRate);
+
+ return endFrame - startFrame;
+}
+
+// True if the number is not an infinity or NaN
+function isValidNumber(x) {
+ return !isNaN(x) && (x != Infinity) && (x != -Infinity);
+}
+
+// Compute the (linear) signal-to-noise ratio between |actual| and
+// |expected|. The result is NOT in dB! If the |actual| and
+// |expected| have different lengths, the shorter length is used.
+function computeSNR(actual, expected) {
+ let signalPower = 0;
+ let noisePower = 0;
+
+ let length = Math.min(actual.length, expected.length);
+
+ for (let k = 0; k < length; ++k) {
+ let diff = actual[k] - expected[k];
+ signalPower += expected[k] * expected[k];
+ noisePower += diff * diff;
+ }
+
+ return signalPower / noisePower;
+}
diff --git a/testing/web-platform/tests/webaudio/resources/audit.js b/testing/web-platform/tests/webaudio/resources/audit.js
new file mode 100644
index 0000000000..ed0078b9c5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/audit.js
@@ -0,0 +1,1447 @@
+// Copyright 2016 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See https://github.com/web-platform-tests/wpt/issues/12781 for information on
+// the purpose of audit.js, and why testharness.js does not suffice.
+
+/**
+ * @fileOverview WebAudio layout test utility library. Built around W3C's
+ * testharness.js. Includes asynchronous test task manager,
+ * assertion utilities.
+ * @dependency testharness.js
+ */
+
+
+(function() {
+
+ 'use strict';
+
+ // Selected methods from testharness.js.
+ let testharnessProperties = [
+ 'test', 'async_test', 'promise_test', 'promise_rejects_js', 'generate_tests',
+ 'setup', 'done', 'assert_true', 'assert_false'
+ ];
+
+ // Check if testharness.js is properly loaded. Throw otherwise.
+ for (let name in testharnessProperties) {
+ if (!self.hasOwnProperty(testharnessProperties[name]))
+ throw new Error('Cannot proceed. testharness.js is not loaded.');
+ }
+})();
+
+
+window.Audit = (function() {
+
+ 'use strict';
+
+ // NOTE: Moving this method (or any other code above) will change the location
+ // of 'CONSOLE ERROR...' message in the expected text files.
+ function _logError(message) {
+ console.error('[audit.js] ' + message);
+ }
+
+ function _logPassed(message) {
+ test(function(arg) {
+ assert_true(true);
+ }, message);
+ }
+
+ function _logFailed(message, detail) {
+ test(function() {
+ assert_true(false, detail);
+ }, message);
+ }
+
+ function _throwException(message) {
+ throw new Error(message);
+ }
+
+ // TODO(hongchan): remove this hack after confirming all the tests are
+ // finished correctly. (crbug.com/708817)
+ const _testharnessDone = window.done;
+ window.done = () => {
+ _throwException('Do NOT call done() method from the test code.');
+ };
+
+ // Generate a descriptive string from a target value in various types.
+ function _generateDescription(target, options) {
+ let targetString;
+
+ switch (typeof target) {
+ case 'object':
+ // Handle Arrays.
+ if (target instanceof Array || target instanceof Float32Array ||
+ target instanceof Float64Array || target instanceof Uint8Array) {
+ let arrayElements = target.length < options.numberOfArrayElements ?
+ String(target) :
+ String(target.slice(0, options.numberOfArrayElements)) + '...';
+ targetString = '[' + arrayElements + ']';
+ } else if (target === null) {
+ targetString = String(target);
+ } else {
+ targetString = '' + String(target).split(/[\s\]]/)[1];
+ }
+ break;
+ case 'function':
+ if (Error.isPrototypeOf(target)) {
+ targetString = "EcmaScript error " + target.name;
+ } else {
+ targetString = String(target);
+ }
+ break;
+ default:
+ targetString = String(target);
+ break;
+ }
+
+ return targetString;
+ }
+
+ // Return a string suitable for printing one failed element in
+ // |beCloseToArray|.
+ function _formatFailureEntry(index, actual, expected, abserr, threshold) {
+ return '\t[' + index + ']\t' + actual.toExponential(16) + '\t' +
+ expected.toExponential(16) + '\t' + abserr.toExponential(16) + '\t' +
+ (abserr / Math.abs(expected)).toExponential(16) + '\t' +
+ threshold.toExponential(16);
+ }
+
+ // Compute the error threshold criterion for |beCloseToArray|
+ function _closeToThreshold(abserr, relerr, expected) {
+ return Math.max(abserr, relerr * Math.abs(expected));
+ }
+
+ /**
+ * @class Should
+ * @description Assertion subtask for the Audit task.
+ * @param {Task} parentTask Associated Task object.
+ * @param {Any} actual Target value to be tested.
+ * @param {String} actualDescription String description of the test target.
+ */
+ class Should {
+ constructor(parentTask, actual, actualDescription) {
+ this._task = parentTask;
+
+ this._actual = actual;
+ this._actualDescription = (actualDescription || null);
+ this._expected = null;
+ this._expectedDescription = null;
+
+ this._detail = '';
+ // If true and the test failed, print the actual value at the
+ // end of the message.
+ this._printActualForFailure = true;
+
+ this._result = null;
+
+ /**
+ * @param {Number} numberOfErrors Number of errors to be printed.
+ * @param {Number} numberOfArrayElements Number of array elements to be
+ * printed in the test log.
+ * @param {Boolean} verbose Verbose output from the assertion.
+ */
+ this._options = {
+ numberOfErrors: 4,
+ numberOfArrayElements: 16,
+ verbose: false
+ };
+ }
+
+ _processArguments(args) {
+ if (args.length === 0)
+ return;
+
+ if (args.length > 0)
+ this._expected = args[0];
+
+ if (typeof args[1] === 'string') {
+ // case 1: (expected, description, options)
+ this._expectedDescription = args[1];
+ Object.assign(this._options, args[2]);
+ } else if (typeof args[1] === 'object') {
+ // case 2: (expected, options)
+ Object.assign(this._options, args[1]);
+ }
+ }
+
+ _buildResultText() {
+ if (this._result === null)
+ _throwException('Illegal invocation: the assertion is not finished.');
+
+ let actualString = _generateDescription(this._actual, this._options);
+
+ // Use generated text when the description is not provided.
+ if (!this._actualDescription)
+ this._actualDescription = actualString;
+
+ if (!this._expectedDescription) {
+ this._expectedDescription =
+ _generateDescription(this._expected, this._options);
+ }
+
+ // For the assertion with a single operand.
+ this._detail =
+ this._detail.replace(/\$\{actual\}/g, this._actualDescription);
+
+ // If there is a second operand (i.e. expected value), we have to build
+ // the string for it as well.
+ this._detail =
+ this._detail.replace(/\$\{expected\}/g, this._expectedDescription);
+
+ // If there is any property in |_options|, replace the property name
+ // with the value.
+ for (let name in this._options) {
+ if (name === 'numberOfErrors' || name === 'numberOfArrayElements' ||
+ name === 'verbose') {
+ continue;
+ }
+
+ // The RegExp key string contains special character. Take care of it.
+ let re = '\$\{' + name + '\}';
+ re = re.replace(/([.*+?^=!:${}()|\[\]\/\\])/g, '\\$1');
+ this._detail = this._detail.replace(
+ new RegExp(re, 'g'), _generateDescription(this._options[name]));
+ }
+
+ // If the test failed, add the actual value at the end.
+ if (this._result === false && this._printActualForFailure === true) {
+ this._detail += ' Got ' + actualString + '.';
+ }
+ }
+
+ _finalize() {
+ if (this._result) {
+ _logPassed(' ' + this._detail);
+ } else {
+ _logFailed('X ' + this._detail);
+ }
+
+ // This assertion is finished, so update the parent task accordingly.
+ this._task.update(this);
+
+ // TODO(hongchan): configurable 'detail' message.
+ }
+
+ _assert(condition, passDetail, failDetail) {
+ this._result = Boolean(condition);
+ this._detail = this._result ? passDetail : failDetail;
+ this._buildResultText();
+ this._finalize();
+
+ return this._result;
+ }
+
+ get result() {
+ return this._result;
+ }
+
+ get detail() {
+ return this._detail;
+ }
+
+ /**
+ * should() assertions.
+ *
+ * @example All the assertions can have 1, 2 or 3 arguments:
+ * should().doAssert(expected);
+ * should().doAssert(expected, options);
+ * should().doAssert(expected, expectedDescription, options);
+ *
+ * @param {Any} expected Expected value of the assertion.
+ * @param {String} expectedDescription Description of expected value.
+ * @param {Object} options Options for assertion.
+ * @param {Number} options.numberOfErrors Number of errors to be printed.
+ * (if applicable)
+ * @param {Number} options.numberOfArrayElements Number of array elements
+ * to be printed. (if
+ * applicable)
+ * @notes Some assertions can have additional options for their specific
+ * testing.
+ */
+
+ /**
+ * Check if |actual| exists.
+ *
+ * @example
+ * should({}, 'An empty object').exist();
+ * @result
+ * "PASS An empty object does exist."
+ */
+ exist() {
+ return this._assert(
+ this._actual !== null && this._actual !== undefined,
+ '${actual} does exist.', '${actual} does not exist.');
+ }
+
+ /**
+ * Check if |actual| operation wrapped in a function throws an exception
+ * with a expected error type correctly. |expected| is optional. If it is an
+ * instance of DOMException, then the description (second argument) can be
+ * provided to be more strict about the expected exception type. |expected|
+ * also can be other generic error types such as TypeError, RangeError or
+ * etc.
+ *
+ * @example
+ * should(() => { let a = b; }, 'A bad code').throw();
+ * should(() => { new SomeConstructor(); }, 'A bad construction')
+ * .throw(DOMException, 'NotSupportedError');
+ * should(() => { let c = d; }, 'Assigning d to c')
+ * .throw(ReferenceError);
+ * should(() => { let e = f; }, 'Assigning e to f')
+ * .throw(ReferenceError, { omitErrorMessage: true });
+ *
+ * @result
+ * "PASS A bad code threw an exception of ReferenceError: b is not
+ * defined."
+ * "PASS A bad construction threw DOMException:NotSupportedError."
+ * "PASS Assigning d to c threw ReferenceError: d is not defined."
+ * "PASS Assigning e to f threw ReferenceError: [error message
+ * omitted]."
+ */
+ throw() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let didThrowCorrectly = false;
+ let passDetail, failDetail;
+
+ try {
+ // This should throw.
+ this._actual();
+ // Catch did not happen, so the test is failed.
+ failDetail = '${actual} did not throw an exception.';
+ } catch (error) {
+ let errorMessage = this._options.omitErrorMessage ?
+ ': [error message omitted]' :
+ ': "' + error.message + '"';
+ if (this._expected === null || this._expected === undefined) {
+ // The expected error type was not given.
+ didThrowCorrectly = true;
+ passDetail = '${actual} threw ' + error.name + errorMessage + '.';
+ } else if (this._expected === DOMException &&
+ this._expectedDescription !== undefined) {
+ // Handles DOMException with an expected exception name.
+ if (this._expectedDescription === error.name) {
+ didThrowCorrectly = true;
+ passDetail = '${actual} threw ${expected}' + errorMessage + '.';
+ } else {
+ didThrowCorrectly = false;
+ failDetail =
+ '${actual} threw "' + error.name + '" instead of ${expected}.';
+ }
+ } else if (this._expected == error.constructor) {
+ // Handler other error types.
+ didThrowCorrectly = true;
+ passDetail = '${actual} threw ' + error.name + errorMessage + '.';
+ } else {
+ didThrowCorrectly = false;
+ failDetail =
+ '${actual} threw "' + error.name + '" instead of ${expected}.';
+ }
+ }
+
+ return this._assert(didThrowCorrectly, passDetail, failDetail);
+ }
+
+ /**
+ * Check if |actual| operation wrapped in a function does not throws an
+ * exception correctly.
+ *
+ * @example
+ * should(() => { let foo = 'bar'; }, 'let foo = "bar"').notThrow();
+ *
+ * @result
+ * "PASS let foo = "bar" did not throw an exception."
+ */
+ notThrow() {
+ this._printActualForFailure = false;
+
+ let didThrowCorrectly = false;
+ let passDetail, failDetail;
+
+ try {
+ this._actual();
+ passDetail = '${actual} did not throw an exception.';
+ } catch (error) {
+ didThrowCorrectly = true;
+ failDetail = '${actual} incorrectly threw ' + error.name + ': "' +
+ error.message + '".';
+ }
+
+ return this._assert(!didThrowCorrectly, passDetail, failDetail);
+ }
+
+ /**
+ * Check if |actual| promise is resolved correctly. Note that the returned
+ * result from promise object will be passed to the following then()
+ * function.
+ *
+ * @example
+ * should('My promise', promise).beResolve().then((result) => {
+ * log(result);
+ * });
+ *
+ * @result
+ * "PASS My promise resolved correctly."
+ * "FAIL X My promise rejected *INCORRECTLY* with _ERROR_."
+ */
+ beResolved() {
+ return this._actual.then(
+ function(result) {
+ this._assert(true, '${actual} resolved correctly.', null);
+ return result;
+ }.bind(this),
+ function(error) {
+ this._assert(
+ false, null,
+ '${actual} rejected incorrectly with ' + error + '.');
+ }.bind(this));
+ }
+
+ /**
+ * Check if |actual| promise is rejected correctly.
+ *
+ * @example
+ * should('My promise', promise).beRejected().then(nextStuff);
+ *
+ * @result
+ * "PASS My promise rejected correctly (with _ERROR_)."
+ * "FAIL X My promise resolved *INCORRECTLY*."
+ */
+ beRejected() {
+ return this._actual.then(
+ function() {
+ this._assert(false, null, '${actual} resolved incorrectly.');
+ }.bind(this),
+ function(error) {
+ this._assert(
+ true, '${actual} rejected correctly with ' + error + '.', null);
+ }.bind(this));
+ }
+
+ /**
+ * Check if |actual| promise is rejected correctly.
+ *
+ * @example
+ * should(promise, 'My promise').beRejectedWith('_ERROR_').then();
+ *
+ * @result
+ * "PASS My promise rejected correctly with _ERROR_."
+ * "FAIL X My promise rejected correctly but got _ACTUAL_ERROR instead of
+ * _EXPECTED_ERROR_."
+ * "FAIL X My promise resolved incorrectly."
+ */
+ beRejectedWith() {
+ this._processArguments(arguments);
+
+ return this._actual.then(
+ function() {
+ this._assert(false, null, '${actual} resolved incorrectly.');
+ }.bind(this),
+ function(error) {
+ if (this._expected !== error.name) {
+ this._assert(
+ false, null,
+ '${actual} rejected correctly but got ' + error.name +
+ ' instead of ' + this._expected + '.');
+ } else {
+ this._assert(
+ true,
+ '${actual} rejected correctly with ' + this._expected + '.',
+ null);
+ }
+ }.bind(this));
+ }
+
+ /**
+ * Check if |actual| is a boolean true.
+ *
+ * @example
+ * should(3 < 5, '3 < 5').beTrue();
+ *
+ * @result
+ * "PASS 3 < 5 is true."
+ */
+ beTrue() {
+ return this._assert(
+ this._actual === true, '${actual} is true.',
+ '${actual} is not true.');
+ }
+
+ /**
+ * Check if |actual| is a boolean false.
+ *
+ * @example
+ * should(3 > 5, '3 > 5').beFalse();
+ *
+ * @result
+ * "PASS 3 > 5 is false."
+ */
+ beFalse() {
+ return this._assert(
+ this._actual === false, '${actual} is false.',
+ '${actual} is not false.');
+ }
+
+ /**
+ * Check if |actual| is strictly equal to |expected|. (no type coercion)
+ *
+ * @example
+ * should(1).beEqualTo(1);
+ *
+ * @result
+ * "PASS 1 is equal to 1."
+ */
+ beEqualTo() {
+ this._processArguments(arguments);
+ return this._assert(
+ this._actual === this._expected, '${actual} is equal to ${expected}.',
+ '${actual} is not equal to ${expected}.');
+ }
+
+ /**
+ * Check if |actual| is not equal to |expected|.
+ *
+ * @example
+ * should(1).notBeEqualTo(2);
+ *
+ * @result
+ * "PASS 1 is not equal to 2."
+ */
+ notBeEqualTo() {
+ this._processArguments(arguments);
+ return this._assert(
+ this._actual !== this._expected,
+ '${actual} is not equal to ${expected}.',
+ '${actual} should not be equal to ${expected}.');
+ }
+
+ /**
+ * check if |actual| is NaN
+ *
+ * @example
+ * should(NaN).beNaN();
+ *
+ * @result
+ * "PASS NaN is NaN"
+ *
+ */
+ beNaN() {
+ this._processArguments(arguments);
+ return this._assert(
+ isNaN(this._actual),
+ '${actual} is NaN.',
+ '${actual} is not NaN but should be.');
+ }
+
+ /**
+ * check if |actual| is NOT NaN
+ *
+ * @example
+ * should(42).notBeNaN();
+ *
+ * @result
+ * "PASS 42 is not NaN"
+ *
+ */
+ notBeNaN() {
+ this._processArguments(arguments);
+ return this._assert(
+ !isNaN(this._actual),
+ '${actual} is not NaN.',
+ '${actual} is NaN but should not be.');
+ }
+
+ /**
+ * Check if |actual| is greater than |expected|.
+ *
+ * @example
+ * should(2).beGreaterThanOrEqualTo(2);
+ *
+ * @result
+ * "PASS 2 is greater than or equal to 2."
+ */
+ beGreaterThan() {
+ this._processArguments(arguments);
+ return this._assert(
+ this._actual > this._expected,
+ '${actual} is greater than ${expected}.',
+ '${actual} is not greater than ${expected}.');
+ }
+
+ /**
+ * Check if |actual| is greater than or equal to |expected|.
+ *
+ * @example
+ * should(2).beGreaterThan(1);
+ *
+ * @result
+ * "PASS 2 is greater than 1."
+ */
+ beGreaterThanOrEqualTo() {
+ this._processArguments(arguments);
+ return this._assert(
+ this._actual >= this._expected,
+ '${actual} is greater than or equal to ${expected}.',
+ '${actual} is not greater than or equal to ${expected}.');
+ }
+
+ /**
+ * Check if |actual| is less than |expected|.
+ *
+ * @example
+ * should(1).beLessThan(2);
+ *
+ * @result
+ * "PASS 1 is less than 2."
+ */
+ beLessThan() {
+ this._processArguments(arguments);
+ return this._assert(
+ this._actual < this._expected, '${actual} is less than ${expected}.',
+ '${actual} is not less than ${expected}.');
+ }
+
+ /**
+ * Check if |actual| is less than or equal to |expected|.
+ *
+ * @example
+ * should(1).beLessThanOrEqualTo(1);
+ *
+ * @result
+ * "PASS 1 is less than or equal to 1."
+ */
+ beLessThanOrEqualTo() {
+ this._processArguments(arguments);
+ return this._assert(
+ this._actual <= this._expected,
+ '${actual} is less than or equal to ${expected}.',
+ '${actual} is not less than or equal to ${expected}.');
+ }
+
+ /**
+ * Check if |actual| array is filled with a constant |expected| value.
+ *
+ * @example
+ * should([1, 1, 1]).beConstantValueOf(1);
+ *
+ * @result
+ * "PASS [1,1,1] contains only the constant 1."
+ */
+ beConstantValueOf() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let passed = true;
+ let passDetail, failDetail;
+ let errors = {};
+
+ let actual = this._actual;
+ let expected = this._expected;
+ for (let index = 0; index < actual.length; ++index) {
+ if (actual[index] !== expected)
+ errors[index] = actual[index];
+ }
+
+ let numberOfErrors = Object.keys(errors).length;
+ passed = numberOfErrors === 0;
+
+ if (passed) {
+ passDetail = '${actual} contains only the constant ${expected}.';
+ } else {
+ let counter = 0;
+ failDetail =
+ '${actual}: Expected ${expected} for all values but found ' +
+ numberOfErrors + ' unexpected values: ';
+ failDetail += '\n\tIndex\tActual';
+ for (let errorIndex in errors) {
+ failDetail += '\n\t[' + errorIndex + ']' +
+ '\t' + errors[errorIndex];
+ if (++counter >= this._options.numberOfErrors) {
+ failDetail +=
+ '\n\t...and ' + (numberOfErrors - counter) + ' more errors.';
+ break;
+ }
+ }
+ }
+
+ return this._assert(passed, passDetail, failDetail);
+ }
+
+ /**
+ * Check if |actual| array is not filled with a constant |expected| value.
+ *
+ * @example
+ * should([1, 0, 1]).notBeConstantValueOf(1);
+ * should([0, 0, 0]).notBeConstantValueOf(0);
+ *
+ * @result
+ * "PASS [1,0,1] is not constantly 1 (contains 1 different value)."
+ * "FAIL X [0,0,0] should have contain at least one value different
+ * from 0."
+ */
+ notBeConstantValueOf() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let passed = true;
+ let passDetail;
+ let failDetail;
+ let differences = {};
+
+ let actual = this._actual;
+ let expected = this._expected;
+ for (let index = 0; index < actual.length; ++index) {
+ if (actual[index] !== expected)
+ differences[index] = actual[index];
+ }
+
+ let numberOfDifferences = Object.keys(differences).length;
+ passed = numberOfDifferences > 0;
+
+ if (passed) {
+ let valueString = numberOfDifferences > 1 ? 'values' : 'value';
+ passDetail = '${actual} is not constantly ${expected} (contains ' +
+ numberOfDifferences + ' different ' + valueString + ').';
+ } else {
+ failDetail = '${actual} should have contain at least one value ' +
+ 'different from ${expected}.';
+ }
+
+ return this._assert(passed, passDetail, failDetail);
+ }
+
+ /**
+ * Check if |actual| array is identical to |expected| array element-wise.
+ *
+ * @example
+ * should([1, 2, 3]).beEqualToArray([1, 2, 3]);
+ *
+ * @result
+ * "[1,2,3] is identical to the array [1,2,3]."
+ */
+ beEqualToArray() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let passed = true;
+ let passDetail, failDetail;
+ let errorIndices = [];
+
+ if (this._actual.length !== this._expected.length) {
+ passed = false;
+ failDetail = 'The array length does not match.';
+ return this._assert(passed, passDetail, failDetail);
+ }
+
+ let actual = this._actual;
+ let expected = this._expected;
+ for (let index = 0; index < actual.length; ++index) {
+ if (actual[index] !== expected[index])
+ errorIndices.push(index);
+ }
+
+ passed = errorIndices.length === 0;
+
+ if (passed) {
+ passDetail = '${actual} is identical to the array ${expected}.';
+ } else {
+ let counter = 0;
+ failDetail =
+ '${actual} expected to be equal to the array ${expected} ' +
+ 'but differs in ' + errorIndices.length + ' places:' +
+ '\n\tIndex\tActual\t\t\tExpected';
+ for (let index of errorIndices) {
+ failDetail += '\n\t[' + index + ']' +
+ '\t' + this._actual[index].toExponential(16) + '\t' +
+ this._expected[index].toExponential(16);
+ if (++counter >= this._options.numberOfErrors) {
+ failDetail += '\n\t...and ' + (errorIndices.length - counter) +
+ ' more errors.';
+ break;
+ }
+ }
+ }
+
+ return this._assert(passed, passDetail, failDetail);
+ }
+
+ /**
+ * Check if |actual| array contains only the values in |expected| in the
+ * order of values in |expected|.
+ *
+ * @example
+ * Should([1, 1, 3, 3, 2], 'My random array').containValues([1, 3, 2]);
+ *
+ * @result
+ * "PASS [1,1,3,3,2] contains all the expected values in the correct
+ * order: [1,3,2].
+ */
+ containValues() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let passed = true;
+ let indexedActual = [];
+ let firstErrorIndex = null;
+
+ // Collect the unique value sequence from the actual.
+ for (let i = 0, prev = null; i < this._actual.length; i++) {
+ if (this._actual[i] !== prev) {
+ indexedActual.push({index: i, value: this._actual[i]});
+ prev = this._actual[i];
+ }
+ }
+
+ // Compare against the expected sequence.
+ let failMessage =
+ '${actual} expected to have the value sequence of ${expected} but ' +
+ 'got ';
+ if (this._expected.length === indexedActual.length) {
+ for (let j = 0; j < this._expected.length; j++) {
+ if (this._expected[j] !== indexedActual[j].value) {
+ firstErrorIndex = indexedActual[j].index;
+ passed = false;
+ failMessage += this._actual[firstErrorIndex] + ' at index ' +
+ firstErrorIndex + '.';
+ break;
+ }
+ }
+ } else {
+ passed = false;
+ let indexedValues = indexedActual.map(x => x.value);
+ failMessage += `${indexedActual.length} values, [${
+ indexedValues}], instead of ${this._expected.length}.`;
+ }
+
+ return this._assert(
+ passed,
+ '${actual} contains all the expected values in the correct order: ' +
+ '${expected}.',
+ failMessage);
+ }
+
+ /**
+ * Check if |actual| array does not have any glitches. Note that |threshold|
+ * is not optional and is to define the desired threshold value.
+ *
+ * @example
+ * should([0.5, 0.5, 0.55, 0.5, 0.45, 0.5]).notGlitch(0.06);
+ *
+ * @result
+ * "PASS [0.5,0.5,0.55,0.5,0.45,0.5] has no glitch above the threshold
+ * of 0.06."
+ *
+ */
+ notGlitch() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let passed = true;
+ let passDetail, failDetail;
+
+ let actual = this._actual;
+ let expected = this._expected;
+ for (let index = 0; index < actual.length; ++index) {
+ let diff = Math.abs(actual[index - 1] - actual[index]);
+ if (diff >= expected) {
+ passed = false;
+ failDetail = '${actual} has a glitch at index ' + index +
+ ' of size ' + diff + '.';
+ }
+ }
+
+ passDetail =
+ '${actual} has no glitch above the threshold of ${expected}.';
+
+ return this._assert(passed, passDetail, failDetail);
+ }
+
+ /**
+ * Check if |actual| is close to |expected| using the given relative error
+ * |threshold|.
+ *
+ * @example
+ * should(2.3).beCloseTo(2, { threshold: 0.3 });
+ *
+ * @result
+ * "PASS 2.3 is 2 within an error of 0.3."
+ * @param {Object} options Options for assertion.
+ * @param {Number} options.threshold Threshold value for the comparison.
+ */
+ beCloseTo() {
+ this._processArguments(arguments);
+
+ // The threshold is relative except when |expected| is zero, in which case
+ // it is absolute.
+ let absExpected = this._expected ? Math.abs(this._expected) : 1;
+ let error = Math.abs(this._actual - this._expected) / absExpected;
+
+ // debugger;
+
+ return this._assert(
+ error <= this._options.threshold,
+ '${actual} is ${expected} within an error of ${threshold}.',
+ '${actual} is not close to ${expected} within a relative error of ' +
+ '${threshold} (RelErr=' + error + ').');
+ }
+
+ /**
+ * Check if |target| array is close to |expected| array element-wise within
+ * a certain error bound given by the |options|.
+ *
+ * The error criterion is:
+ * abs(actual[k] - expected[k]) < max(absErr, relErr * abs(expected))
+ *
+ * If nothing is given for |options|, then absErr = relErr = 0. If
+ * absErr = 0, then the error criterion is a relative error. A non-zero
+ * absErr value produces a mix intended to handle the case where the
+ * expected value is 0, allowing the target value to differ by absErr from
+ * the expected.
+ *
+ * @param {Number} options.absoluteThreshold Absolute threshold.
+ * @param {Number} options.relativeThreshold Relative threshold.
+ */
+ beCloseToArray() {
+ this._processArguments(arguments);
+ this._printActualForFailure = false;
+
+ let passed = true;
+ let passDetail, failDetail;
+
+ // Parsing options.
+ let absErrorThreshold = (this._options.absoluteThreshold || 0);
+ let relErrorThreshold = (this._options.relativeThreshold || 0);
+
+ // A collection of all of the values that satisfy the error criterion.
+ // This holds the absolute difference between the target element and the
+ // expected element.
+ let errors = {};
+
+ // Keep track of the max absolute error found.
+ let maxAbsError = -Infinity, maxAbsErrorIndex = -1;
+
+ // Keep track of the max relative error found, ignoring cases where the
+ // relative error is Infinity because the expected value is 0.
+ let maxRelError = -Infinity, maxRelErrorIndex = -1;
+
+ let actual = this._actual;
+ let expected = this._expected;
+
+ for (let index = 0; index < expected.length; ++index) {
+ let diff = Math.abs(actual[index] - expected[index]);
+ let absExpected = Math.abs(expected[index]);
+ let relError = diff / absExpected;
+
+ if (diff >
+ Math.max(absErrorThreshold, relErrorThreshold * absExpected)) {
+ if (diff > maxAbsError) {
+ maxAbsErrorIndex = index;
+ maxAbsError = diff;
+ }
+
+ if (!isNaN(relError) && relError > maxRelError) {
+ maxRelErrorIndex = index;
+ maxRelError = relError;
+ }
+
+ errors[index] = diff;
+ }
+ }
+
+ let numberOfErrors = Object.keys(errors).length;
+ let maxAllowedErrorDetail = JSON.stringify({
+ absoluteThreshold: absErrorThreshold,
+ relativeThreshold: relErrorThreshold
+ });
+
+ if (numberOfErrors === 0) {
+ // The assertion was successful.
+ passDetail = '${actual} equals ${expected} with an element-wise ' +
+ 'tolerance of ' + maxAllowedErrorDetail + '.';
+ } else {
+ // Failed. Prepare the detailed failure log.
+ passed = false;
+ failDetail = '${actual} does not equal ${expected} with an ' +
+ 'element-wise tolerance of ' + maxAllowedErrorDetail + '.\n';
+
+ // Print out actual, expected, absolute error, and relative error.
+ let counter = 0;
+ failDetail += '\tIndex\tActual\t\t\tExpected\t\tAbsError' +
+ '\t\tRelError\t\tTest threshold';
+ let printedIndices = [];
+ for (let index in errors) {
+ failDetail +=
+ '\n' +
+ _formatFailureEntry(
+ index, actual[index], expected[index], errors[index],
+ _closeToThreshold(
+ absErrorThreshold, relErrorThreshold, expected[index]));
+
+ printedIndices.push(index);
+ if (++counter > this._options.numberOfErrors) {
+ failDetail +=
+ '\n\t...and ' + (numberOfErrors - counter) + ' more errors.';
+ break;
+ }
+ }
+
+ // Finalize the error log: print out the location of both the maxAbs
+ // error and the maxRel error so we can adjust thresholds appropriately
+ // in the test.
+ failDetail += '\n' +
+ '\tMax AbsError of ' + maxAbsError.toExponential(16) +
+ ' at index of ' + maxAbsErrorIndex + '.\n';
+ if (printedIndices.find(element => {
+ return element == maxAbsErrorIndex;
+ }) === undefined) {
+ // Print an entry for this index if we haven't already.
+ failDetail +=
+ _formatFailureEntry(
+ maxAbsErrorIndex, actual[maxAbsErrorIndex],
+ expected[maxAbsErrorIndex], errors[maxAbsErrorIndex],
+ _closeToThreshold(
+ absErrorThreshold, relErrorThreshold,
+ expected[maxAbsErrorIndex])) +
+ '\n';
+ }
+ failDetail += '\tMax RelError of ' + maxRelError.toExponential(16) +
+ ' at index of ' + maxRelErrorIndex + '.\n';
+ if (printedIndices.find(element => {
+ return element == maxRelErrorIndex;
+ }) === undefined) {
+ // Print an entry for this index if we haven't already.
+ failDetail +=
+ _formatFailureEntry(
+ maxRelErrorIndex, actual[maxRelErrorIndex],
+ expected[maxRelErrorIndex], errors[maxRelErrorIndex],
+ _closeToThreshold(
+ absErrorThreshold, relErrorThreshold,
+ expected[maxRelErrorIndex])) +
+ '\n';
+ }
+ }
+
+ return this._assert(passed, passDetail, failDetail);
+ }
+
+ /**
+ * A temporary escape hat for printing an in-task message. The description
+ * for the |actual| is required to get the message printed properly.
+ *
+ * TODO(hongchan): remove this method when the transition from the old Audit
+ * to the new Audit is completed.
+ * @example
+ * should(true, 'The message is').message('truthful!', 'false!');
+ *
+ * @result
+ * "PASS The message is truthful!"
+ */
+ message(passDetail, failDetail) {
+ return this._assert(
+ this._actual, '${actual} ' + passDetail, '${actual} ' + failDetail);
+ }
+
+ /**
+ * Check if |expected| property is truly owned by |actual| object.
+ *
+ * @example
+ * should(BaseAudioContext.prototype,
+ * 'BaseAudioContext.prototype').haveOwnProperty('createGain');
+ *
+ * @result
+ * "PASS BaseAudioContext.prototype has an own property of
+ * 'createGain'."
+ */
+ haveOwnProperty() {
+ this._processArguments(arguments);
+
+ return this._assert(
+ this._actual.hasOwnProperty(this._expected),
+ '${actual} has an own property of "${expected}".',
+ '${actual} does not own the property of "${expected}".');
+ }
+
+
+ /**
+ * Check if |expected| property is not owned by |actual| object.
+ *
+ * @example
+ * should(BaseAudioContext.prototype,
+ * 'BaseAudioContext.prototype')
+ * .notHaveOwnProperty('startRendering');
+ *
+ * @result
+ * "PASS BaseAudioContext.prototype does not have an own property of
+ * 'startRendering'."
+ */
+ notHaveOwnProperty() {
+ this._processArguments(arguments);
+
+ return this._assert(
+ !this._actual.hasOwnProperty(this._expected),
+ '${actual} does not have an own property of "${expected}".',
+ '${actual} has an own the property of "${expected}".')
+ }
+
+
+ /**
+ * Check if an object is inherited from a class. This looks up the entire
+ * prototype chain of a given object and tries to find a match.
+ *
+ * @example
+ * should(sourceNode, 'A buffer source node')
+ * .inheritFrom('AudioScheduledSourceNode');
+ *
+ * @result
+ * "PASS A buffer source node inherits from 'AudioScheduledSourceNode'."
+ */
+ inheritFrom() {
+ this._processArguments(arguments);
+
+ let prototypes = [];
+ let currentPrototype = Object.getPrototypeOf(this._actual);
+ while (currentPrototype) {
+ prototypes.push(currentPrototype.constructor.name);
+ currentPrototype = Object.getPrototypeOf(currentPrototype);
+ }
+
+ return this._assert(
+ prototypes.includes(this._expected),
+ '${actual} inherits from "${expected}".',
+ '${actual} does not inherit from "${expected}".');
+ }
+ }
+
+
+ // Task Class state enum.
+ const TaskState = {PENDING: 0, STARTED: 1, FINISHED: 2};
+
+
+ /**
+ * @class Task
+ * @description WebAudio testing task. Managed by TaskRunner.
+ */
+ class Task {
+ /**
+ * Task constructor.
+ * @param {Object} taskRunner Reference of associated task runner.
+ * @param {String||Object} taskLabel Task label if a string is given. This
+ * parameter can be a dictionary with the
+ * following fields.
+ * @param {String} taskLabel.label Task label.
+ * @param {String} taskLabel.description Description of task.
+ * @param {Function} taskFunction Task function to be performed.
+ * @return {Object} Task object.
+ */
+ constructor(taskRunner, taskLabel, taskFunction) {
+ this._taskRunner = taskRunner;
+ this._taskFunction = taskFunction;
+
+ if (typeof taskLabel === 'string') {
+ this._label = taskLabel;
+ this._description = null;
+ } else if (typeof taskLabel === 'object') {
+ if (typeof taskLabel.label !== 'string') {
+ _throwException('Task.constructor:: task label must be string.');
+ }
+ this._label = taskLabel.label;
+ this._description = (typeof taskLabel.description === 'string') ?
+ taskLabel.description :
+ null;
+ } else {
+ _throwException(
+ 'Task.constructor:: task label must be a string or ' +
+ 'a dictionary.');
+ }
+
+ this._state = TaskState.PENDING;
+ this._result = true;
+
+ this._totalAssertions = 0;
+ this._failedAssertions = 0;
+ }
+
+ get label() {
+ return this._label;
+ }
+
+ get state() {
+ return this._state;
+ }
+
+ get result() {
+ return this._result;
+ }
+
+ // Start the assertion chain.
+ should(actual, actualDescription) {
+ // If no argument is given, we cannot proceed. Halt.
+ if (arguments.length === 0)
+ _throwException('Task.should:: requires at least 1 argument.');
+
+ return new Should(this, actual, actualDescription);
+ }
+
+ // Run this task. |this| task will be passed into the user-supplied test
+ // task function.
+ run(harnessTest) {
+ this._state = TaskState.STARTED;
+ this._harnessTest = harnessTest;
+ // Print out the task entry with label and description.
+ _logPassed(
+ '> [' + this._label + '] ' +
+ (this._description ? this._description : ''));
+
+ return new Promise((resolve, reject) => {
+ this._resolve = resolve;
+ this._reject = reject;
+ let result = this._taskFunction(this, this.should.bind(this));
+ if (result && typeof result.then === "function") {
+ result.then(() => this.done()).catch(reject);
+ }
+ });
+ }
+
+ // Update the task success based on the individual assertion/test inside.
+ update(subTask) {
+ // After one of tests fails within a task, the result is irreversible.
+ if (subTask.result === false) {
+ this._result = false;
+ this._failedAssertions++;
+ }
+
+ this._totalAssertions++;
+ }
+
+ // Finish the current task and start the next one if available.
+ done() {
+ assert_equals(this._state, TaskState.STARTED)
+ this._state = TaskState.FINISHED;
+
+ let message = '< [' + this._label + '] ';
+
+ if (this._result) {
+ message += 'All assertions passed. (total ' + this._totalAssertions +
+ ' assertions)';
+ _logPassed(message);
+ } else {
+ message += this._failedAssertions + ' out of ' + this._totalAssertions +
+ ' assertions were failed.'
+ _logFailed(message);
+ }
+
+ this._resolve();
+ }
+
+ // Runs |subTask| |time| milliseconds later. |setTimeout| is not allowed in
+ // WPT linter, so a thin wrapper around the harness's |step_timeout| is
+ // used here. Returns a Promise which is resolved after |subTask| runs.
+ timeout(subTask, time) {
+ return new Promise(resolve => {
+ this._harnessTest.step_timeout(() => {
+ let result = subTask();
+ if (result && typeof result.then === "function") {
+ // Chain rejection directly to the harness test Promise, to report
+ // the rejection against the subtest even when the caller of
+ // timeout does not handle the rejection.
+ result.then(resolve, this._reject());
+ } else {
+ resolve();
+ }
+ }, time);
+ });
+ }
+
+ isPassed() {
+ return this._state === TaskState.FINISHED && this._result;
+ }
+
+ toString() {
+ return '"' + this._label + '": ' + this._description;
+ }
+ }
+
+
+ /**
+ * @class TaskRunner
+ * @description WebAudio testing task runner. Manages tasks.
+ */
+ class TaskRunner {
+ constructor() {
+ this._tasks = {};
+ this._taskSequence = [];
+
+ // Configure testharness.js for the async operation.
+ setup(new Function(), {explicit_done: true});
+ }
+
+ _finish() {
+ let numberOfFailures = 0;
+ for (let taskIndex in this._taskSequence) {
+ let task = this._tasks[this._taskSequence[taskIndex]];
+ numberOfFailures += task.result ? 0 : 1;
+ }
+
+ let prefix = '# AUDIT TASK RUNNER FINISHED: ';
+ if (numberOfFailures > 0) {
+ _logFailed(
+ prefix + numberOfFailures + ' out of ' + this._taskSequence.length +
+ ' tasks were failed.');
+ } else {
+ _logPassed(
+ prefix + this._taskSequence.length + ' tasks ran successfully.');
+ }
+
+ return Promise.resolve();
+ }
+
+ // |taskLabel| can be either a string or a dictionary. See Task constructor
+ // for the detail. If |taskFunction| returns a thenable, then the task
+ // is considered complete when the thenable is fulfilled; otherwise the
+ // task must be completed with an explicit call to |task.done()|.
+ define(taskLabel, taskFunction) {
+ let task = new Task(this, taskLabel, taskFunction);
+ if (this._tasks.hasOwnProperty(task.label)) {
+ _throwException('Audit.define:: Duplicate task definition.');
+ return;
+ }
+ this._tasks[task.label] = task;
+ this._taskSequence.push(task.label);
+ }
+
+ // Start running all the tasks scheduled. Multiple task names can be passed
+ // to execute them sequentially. Zero argument will perform all defined
+ // tasks in the order of definition.
+ run() {
+ // Display the beginning of the test suite.
+ _logPassed('# AUDIT TASK RUNNER STARTED.');
+
+ // If the argument is specified, override the default task sequence with
+ // the specified one.
+ if (arguments.length > 0) {
+ this._taskSequence = [];
+ for (let i = 0; i < arguments.length; i++) {
+ let taskLabel = arguments[i];
+ if (!this._tasks.hasOwnProperty(taskLabel)) {
+ _throwException('Audit.run:: undefined task.');
+ } else if (this._taskSequence.includes(taskLabel)) {
+ _throwException('Audit.run:: duplicate task request.');
+ } else {
+ this._taskSequence.push(taskLabel);
+ }
+ }
+ }
+
+ if (this._taskSequence.length === 0) {
+ _throwException('Audit.run:: no task to run.');
+ return;
+ }
+
+ for (let taskIndex in this._taskSequence) {
+ let task = this._tasks[this._taskSequence[taskIndex]];
+ // Some tests assume that tasks run in sequence, which is provided by
+ // promise_test().
+ promise_test((t) => task.run(t), `Executing "${task.label}"`);
+ }
+
+ // Schedule a summary report on completion.
+ promise_test(() => this._finish(), "Audit report");
+
+ // From testharness.js. The harness now need not wait for more subtests
+ // to be added.
+ _testharnessDone();
+ }
+ }
+
+ /**
+ * Load file from a given URL and pass ArrayBuffer to the following promise.
+ * @param {String} fileUrl file URL.
+ * @return {Promise}
+ *
+ * @example
+ * Audit.loadFileFromUrl('resources/my-sound.ogg').then((response) => {
+ * audioContext.decodeAudioData(response).then((audioBuffer) => {
+ * // Do something with AudioBuffer.
+ * });
+ * });
+ */
+ function loadFileFromUrl(fileUrl) {
+ return new Promise((resolve, reject) => {
+ let xhr = new XMLHttpRequest();
+ xhr.open('GET', fileUrl, true);
+ xhr.responseType = 'arraybuffer';
+
+ xhr.onload = () => {
+ // |status = 0| is a workaround for the run_web_test.py server. We are
+ // speculating the server quits the transaction prematurely without
+ // completing the request.
+ if (xhr.status === 200 || xhr.status === 0) {
+ resolve(xhr.response);
+ } else {
+ let errorMessage = 'loadFile: Request failed when loading ' +
+ fileUrl + '. ' + xhr.statusText + '. (status = ' + xhr.status +
+ ')';
+ if (reject) {
+ reject(errorMessage);
+ } else {
+ new Error(errorMessage);
+ }
+ }
+ };
+
+ xhr.onerror = (event) => {
+ let errorMessage =
+ 'loadFile: Network failure when loading ' + fileUrl + '.';
+ if (reject) {
+ reject(errorMessage);
+ } else {
+ new Error(errorMessage);
+ }
+ };
+
+ xhr.send();
+ });
+ }
+
+ /**
+ * @class Audit
+ * @description A WebAudio layout test task manager.
+ * @example
+ * let audit = Audit.createTaskRunner();
+ * audit.define('first-task', function (task, should) {
+ * should(someValue).beEqualTo(someValue);
+ * task.done();
+ * });
+ * audit.run();
+ */
+ return {
+
+ /**
+ * Creates an instance of Audit task runner.
+ * @param {Object} options Options for task runner.
+ * @param {Boolean} options.requireResultFile True if the test suite
+ * requires explicit text
+ * comparison with the expected
+ * result file.
+ */
+ createTaskRunner: function(options) {
+ if (options && options.requireResultFile == true) {
+ _logError(
+ 'this test requires the explicit comparison with the ' +
+ 'expected result when it runs with run_web_tests.py.');
+ }
+
+ return new TaskRunner();
+ },
+
+ /**
+ * Load file from a given URL and pass ArrayBuffer to the following promise.
+ * See |loadFileFromUrl| method for the detail.
+ */
+ loadFileFromUrl: loadFileFromUrl
+
+ };
+
+})();
diff --git a/testing/web-platform/tests/webaudio/resources/biquad-filters.js b/testing/web-platform/tests/webaudio/resources/biquad-filters.js
new file mode 100644
index 0000000000..467436326a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/biquad-filters.js
@@ -0,0 +1,376 @@
+// A biquad filter has a z-transform of
+// H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2)
+//
+// The formulas for the various filters were taken from
+// http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt.
+
+
+// Lowpass filter.
+function createLowpassFilter(freq, q, gain) {
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+
+ if (freq == 1) {
+ // The formula below works, except for roundoff. When freq = 1,
+ // the filter is just a wire, so hardwire the coefficients.
+ b0 = 1;
+ b1 = 0;
+ b2 = 0;
+ a0 = 1;
+ a1 = 0;
+ a2 = 0;
+ } else {
+ let theta = Math.PI * freq;
+ let alpha = Math.sin(theta) / (2 * Math.pow(10, q / 20));
+ let cosw = Math.cos(theta);
+ let beta = (1 - cosw) / 2;
+
+ b0 = beta;
+ b1 = 2 * beta;
+ b2 = beta;
+ a0 = 1 + alpha;
+ a1 = -2 * cosw;
+ a2 = 1 - alpha;
+ }
+
+ return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+}
+
+function createHighpassFilter(freq, q, gain) {
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+
+ if (freq == 1) {
+ // The filter is 0
+ b0 = 0;
+ b1 = 0;
+ b2 = 0;
+ a0 = 1;
+ a1 = 0;
+ a2 = 0;
+ } else if (freq == 0) {
+ // The filter is 1. Computation of coefficients below is ok, but
+ // there's a pole at 1 and a zero at 1, so round-off could make
+ // the filter unstable.
+ b0 = 1;
+ b1 = 0;
+ b2 = 0;
+ a0 = 1;
+ a1 = 0;
+ a2 = 0;
+ } else {
+ let theta = Math.PI * freq;
+ let alpha = Math.sin(theta) / (2 * Math.pow(10, q / 20));
+ let cosw = Math.cos(theta);
+ let beta = (1 + cosw) / 2;
+
+ b0 = beta;
+ b1 = -2 * beta;
+ b2 = beta;
+ a0 = 1 + alpha;
+ a1 = -2 * cosw;
+ a2 = 1 - alpha;
+ }
+
+ return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+}
+
+function normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2) {
+ let scale = 1 / a0;
+
+ return {
+ b0: b0 * scale,
+ b1: b1 * scale,
+ b2: b2 * scale,
+ a1: a1 * scale,
+ a2: a2 * scale
+ };
+}
+
+function createBandpassFilter(freq, q, gain) {
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+ let coef;
+
+ if (freq > 0 && freq < 1) {
+ let w0 = Math.PI * freq;
+ if (q > 0) {
+ let alpha = Math.sin(w0) / (2 * q);
+ let k = Math.cos(w0);
+
+ b0 = alpha;
+ b1 = 0;
+ b2 = -alpha;
+ a0 = 1 + alpha;
+ a1 = -2 * k;
+ a2 = 1 - alpha;
+
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // q = 0, and frequency is not 0 or 1. The above formula has a
+ // divide by zero problem. The limit of the z-transform as q
+ // approaches 0 is 1, so set the filter that way.
+ coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+ } else {
+ // When freq = 0 or 1, the z-transform is identically 0,
+ // independent of q.
+ coef = { b0: 0, b1: 0, b2: 0, a1: 0, a2: 0 }
+ }
+
+ return coef;
+}
+
+function createLowShelfFilter(freq, q, gain) {
+ // q not used
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+ let coef;
+
+ let S = 1;
+ let A = Math.pow(10, gain / 40);
+
+ if (freq == 1) {
+ // The filter is just a constant gain
+ coef = {b0: A * A, b1: 0, b2: 0, a1: 0, a2: 0};
+ } else if (freq == 0) {
+ // The filter is 1
+ coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0};
+ } else {
+ let w0 = Math.PI * freq;
+ let alpha = 1 / 2 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2);
+ let k = Math.cos(w0);
+ let k2 = 2 * Math.sqrt(A) * alpha;
+ let Ap1 = A + 1;
+ let Am1 = A - 1;
+
+ b0 = A * (Ap1 - Am1 * k + k2);
+ b1 = 2 * A * (Am1 - Ap1 * k);
+ b2 = A * (Ap1 - Am1 * k - k2);
+ a0 = Ap1 + Am1 * k + k2;
+ a1 = -2 * (Am1 + Ap1 * k);
+ a2 = Ap1 + Am1 * k - k2;
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ }
+
+ return coef;
+}
+
+function createHighShelfFilter(freq, q, gain) {
+ // q not used
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+ let coef;
+
+ let A = Math.pow(10, gain / 40);
+
+ if (freq == 1) {
+ // When freq = 1, the z-transform is 1
+ coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0};
+ } else if (freq > 0) {
+ let w0 = Math.PI * freq;
+ let S = 1;
+ let alpha = 0.5 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2);
+ let k = Math.cos(w0);
+ let k2 = 2 * Math.sqrt(A) * alpha;
+ let Ap1 = A + 1;
+ let Am1 = A - 1;
+
+ b0 = A * (Ap1 + Am1 * k + k2);
+ b1 = -2 * A * (Am1 + Ap1 * k);
+ b2 = A * (Ap1 + Am1 * k - k2);
+ a0 = Ap1 - Am1 * k + k2;
+ a1 = 2 * (Am1 - Ap1 * k);
+ a2 = Ap1 - Am1 * k - k2;
+
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // When freq = 0, the filter is just a gain
+ coef = {b0: A * A, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+
+ return coef;
+}
+
+function createPeakingFilter(freq, q, gain) {
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+ let coef;
+
+ let A = Math.pow(10, gain / 40);
+
+ if (freq > 0 && freq < 1) {
+ if (q > 0) {
+ let w0 = Math.PI * freq;
+ let alpha = Math.sin(w0) / (2 * q);
+ let k = Math.cos(w0);
+
+ b0 = 1 + alpha * A;
+ b1 = -2 * k;
+ b2 = 1 - alpha * A;
+ a0 = 1 + alpha / A;
+ a1 = -2 * k;
+ a2 = 1 - alpha / A;
+
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // q = 0, we have a divide by zero problem in the formulas
+ // above. But if we look at the z-transform, we see that the
+ // limit as q approaches 0 is A^2.
+ coef = {b0: A * A, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+ } else {
+ // freq = 0 or 1, the z-transform is 1
+ coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+
+ return coef;
+}
+
+function createNotchFilter(freq, q, gain) {
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+ let coef;
+
+ if (freq > 0 && freq < 1) {
+ if (q > 0) {
+ let w0 = Math.PI * freq;
+ let alpha = Math.sin(w0) / (2 * q);
+ let k = Math.cos(w0);
+
+ b0 = 1;
+ b1 = -2 * k;
+ b2 = 1;
+ a0 = 1 + alpha;
+ a1 = -2 * k;
+ a2 = 1 - alpha;
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // When q = 0, we get a divide by zero above. The limit of the
+ // z-transform as q approaches 0 is 0, so set the coefficients
+ // appropriately.
+ coef = {b0: 0, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+ } else {
+ // When freq = 0 or 1, the z-transform is 1
+ coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+
+ return coef;
+}
+
+function createAllpassFilter(freq, q, gain) {
+ let b0;
+ let b1;
+ let b2;
+ let a0;
+ let a1;
+ let a2;
+ let coef;
+
+ if (freq > 0 && freq < 1) {
+ if (q > 0) {
+ let w0 = Math.PI * freq;
+ let alpha = Math.sin(w0) / (2 * q);
+ let k = Math.cos(w0);
+
+ b0 = 1 - alpha;
+ b1 = -2 * k;
+ b2 = 1 + alpha;
+ a0 = 1 + alpha;
+ a1 = -2 * k;
+ a2 = 1 - alpha;
+ coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2);
+ } else {
+ // q = 0
+ coef = {b0: -1, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+ } else {
+ coef = {b0: 1, b1: 0, b2: 0, a1: 0, a2: 0};
+ }
+
+ return coef;
+}
+
+function filterData(filterCoef, signal, len) {
+ let y = new Array(len);
+ let b0 = filterCoef.b0;
+ let b1 = filterCoef.b1;
+ let b2 = filterCoef.b2;
+ let a1 = filterCoef.a1;
+ let a2 = filterCoef.a2;
+
+ // Prime the pump. (Assumes the signal has length >= 2!)
+ y[0] = b0 * signal[0];
+ y[1] = b0 * signal[1] + b1 * signal[0] - a1 * y[0];
+
+ // Filter all of the signal that we have.
+ for (let k = 2; k < Math.min(signal.length, len); ++k) {
+ y[k] = b0 * signal[k] + b1 * signal[k - 1] + b2 * signal[k - 2] -
+ a1 * y[k - 1] - a2 * y[k - 2];
+ }
+
+ // If we need to filter more, but don't have any signal left,
+ // assume the signal is zero.
+ for (let k = signal.length; k < len; ++k) {
+ y[k] = -a1 * y[k - 1] - a2 * y[k - 2];
+ }
+
+ return y;
+}
+
+// Map the filter type name to a function that computes the filter coefficents
+// for the given filter type.
+let filterCreatorFunction = {
+ 'lowpass': createLowpassFilter,
+ 'highpass': createHighpassFilter,
+ 'bandpass': createBandpassFilter,
+ 'lowshelf': createLowShelfFilter,
+ 'highshelf': createHighShelfFilter,
+ 'peaking': createPeakingFilter,
+ 'notch': createNotchFilter,
+ 'allpass': createAllpassFilter
+};
+
+let filterTypeName = {
+ 'lowpass': 'Lowpass filter',
+ 'highpass': 'Highpass filter',
+ 'bandpass': 'Bandpass filter',
+ 'lowshelf': 'Lowshelf filter',
+ 'highshelf': 'Highshelf filter',
+ 'peaking': 'Peaking filter',
+ 'notch': 'Notch filter',
+ 'allpass': 'Allpass filter'
+};
+
+function createFilter(filterType, freq, q, gain) {
+ return filterCreatorFunction[filterType](freq, q, gain);
+}
diff --git a/testing/web-platform/tests/webaudio/resources/biquad-testing.js b/testing/web-platform/tests/webaudio/resources/biquad-testing.js
new file mode 100644
index 0000000000..7f90a1f72b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/biquad-testing.js
@@ -0,0 +1,172 @@
+// Globals, to make testing and debugging easier.
+let context;
+let filter;
+let signal;
+let renderedBuffer;
+let renderedData;
+
+// Use a power of two to eliminate round-off in converting frame to time
+let sampleRate = 32768;
+let pulseLengthFrames = .1 * sampleRate;
+
+// Maximum allowed error for the test to succeed. Experimentally determined.
+let maxAllowedError = 5.9e-8;
+
+// This must be large enough so that the filtered result is essentially zero.
+// See comments for createTestAndRun. This must be a whole number of frames.
+let timeStep = Math.ceil(.1 * sampleRate) / sampleRate;
+
+// Maximum number of filters we can process (mostly for setting the
+// render length correctly.)
+let maxFilters = 5;
+
+// How long to render. Must be long enough for all of the filters we
+// want to test.
+let renderLengthSeconds = timeStep * (maxFilters + 1);
+
+let renderLengthSamples = Math.round(renderLengthSeconds * sampleRate);
+
+// Number of filters that will be processed.
+let nFilters;
+
+function createImpulseBuffer(context, length) {
+ let impulse = context.createBuffer(1, length, context.sampleRate);
+ let data = impulse.getChannelData(0);
+ for (let k = 1; k < data.length; ++k) {
+ data[k] = 0;
+ }
+ data[0] = 1;
+
+ return impulse;
+}
+
+
+function createTestAndRun(context, filterType, testParameters) {
+ // To test the filters, we apply a signal (an impulse) to each of
+ // the specified filters, with each signal starting at a different
+ // time. The output of the filters is summed together at the
+ // output. Thus for filter k, the signal input to the filter
+ // starts at time k * timeStep. For this to work well, timeStep
+ // must be large enough for the output of each filter to have
+ // decayed to zero with timeStep seconds. That way the filter
+ // outputs don't interfere with each other.
+
+ let filterParameters = testParameters.filterParameters;
+ nFilters = Math.min(filterParameters.length, maxFilters);
+
+ signal = new Array(nFilters);
+ filter = new Array(nFilters);
+
+ impulse = createImpulseBuffer(context, pulseLengthFrames);
+
+ // Create all of the signal sources and filters that we need.
+ for (let k = 0; k < nFilters; ++k) {
+ signal[k] = context.createBufferSource();
+ signal[k].buffer = impulse;
+
+ filter[k] = context.createBiquadFilter();
+ filter[k].type = filterType;
+ filter[k].frequency.value =
+ context.sampleRate / 2 * filterParameters[k].cutoff;
+ filter[k].detune.value = (filterParameters[k].detune === undefined) ?
+ 0 :
+ filterParameters[k].detune;
+ filter[k].Q.value = filterParameters[k].q;
+ filter[k].gain.value = filterParameters[k].gain;
+
+ signal[k].connect(filter[k]);
+ filter[k].connect(context.destination);
+
+ signal[k].start(timeStep * k);
+ }
+
+ return context.startRendering().then(buffer => {
+ checkFilterResponse(buffer, filterType, testParameters);
+ });
+}
+
+function addSignal(dest, src, destOffset) {
+ // Add src to dest at the given dest offset.
+ for (let k = destOffset, j = 0; k < dest.length, j < src.length; ++k, ++j) {
+ dest[k] += src[j];
+ }
+}
+
+function generateReference(filterType, filterParameters) {
+ let result = new Array(renderLengthSamples);
+ let data = new Array(renderLengthSamples);
+ // Initialize the result array and data.
+ for (let k = 0; k < result.length; ++k) {
+ result[k] = 0;
+ data[k] = 0;
+ }
+ // Make data an impulse.
+ data[0] = 1;
+
+ for (let k = 0; k < nFilters; ++k) {
+ // Filter an impulse
+ let detune = (filterParameters[k].detune === undefined) ?
+ 0 :
+ filterParameters[k].detune;
+ let frequency = filterParameters[k].cutoff *
+ Math.pow(2, detune / 1200); // Apply detune, converting from Cents.
+
+ let filterCoef = createFilter(
+ filterType, frequency, filterParameters[k].q, filterParameters[k].gain);
+ let y = filterData(filterCoef, data, renderLengthSamples);
+
+ // Accumulate this filtered data into the final output at the desired
+ // offset.
+ addSignal(result, y, timeToSampleFrame(timeStep * k, sampleRate));
+ }
+
+ return result;
+}
+
+function checkFilterResponse(renderedBuffer, filterType, testParameters) {
+ let filterParameters = testParameters.filterParameters;
+ let maxAllowedError = testParameters.threshold;
+ let should = testParameters.should;
+
+ renderedData = renderedBuffer.getChannelData(0);
+
+ reference = generateReference(filterType, filterParameters);
+
+ let len = Math.min(renderedData.length, reference.length);
+
+ let success = true;
+
+ // Maximum error between rendered data and expected data
+ let maxError = 0;
+
+ // Sample offset where the maximum error occurred.
+ let maxPosition = 0;
+
+ // Number of infinities or NaNs that occurred in the rendered data.
+ let invalidNumberCount = 0;
+
+ should(nFilters, 'Number of filters tested')
+ .beEqualTo(filterParameters.length);
+
+ // Compare the rendered signal with our reference, keeping
+ // track of the maximum difference (and the offset of the max
+ // difference.) Check for bad numbers in the rendered output
+ // too. There shouldn't be any.
+ for (let k = 0; k < len; ++k) {
+ let err = Math.abs(renderedData[k] - reference[k]);
+ if (err > maxError) {
+ maxError = err;
+ maxPosition = k;
+ }
+ if (!isValidNumber(renderedData[k])) {
+ ++invalidNumberCount;
+ }
+ }
+
+ should(
+ invalidNumberCount, 'Number of non-finite values in the rendered output')
+ .beEqualTo(0);
+
+ should(maxError, 'Max error in ' + filterTypeName[filterType] + ' response')
+ .beLessThanOrEqualTo(maxAllowedError);
+}
diff --git a/testing/web-platform/tests/webaudio/resources/convolution-testing.js b/testing/web-platform/tests/webaudio/resources/convolution-testing.js
new file mode 100644
index 0000000000..c976f86c78
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/convolution-testing.js
@@ -0,0 +1,168 @@
+let sampleRate = 44100.0;
+
+let renderLengthSeconds = 8;
+let pulseLengthSeconds = 1;
+let pulseLengthFrames = pulseLengthSeconds * sampleRate;
+
+function createSquarePulseBuffer(context, sampleFrameLength) {
+ let audioBuffer =
+ context.createBuffer(1, sampleFrameLength, context.sampleRate);
+
+ let n = audioBuffer.length;
+ let data = audioBuffer.getChannelData(0);
+
+ for (let i = 0; i < n; ++i)
+ data[i] = 1;
+
+ return audioBuffer;
+}
+
+// The triangle buffer holds the expected result of the convolution.
+// It linearly ramps up from 0 to its maximum value (at the center)
+// then linearly ramps down to 0. The center value corresponds to the
+// point where the two square pulses overlap the most.
+function createTrianglePulseBuffer(context, sampleFrameLength) {
+ let audioBuffer =
+ context.createBuffer(1, sampleFrameLength, context.sampleRate);
+
+ let n = audioBuffer.length;
+ let halfLength = n / 2;
+ let data = audioBuffer.getChannelData(0);
+
+ for (let i = 0; i < halfLength; ++i)
+ data[i] = i + 1;
+
+ for (let i = halfLength; i < n; ++i)
+ data[i] = n - i - 1;
+
+ return audioBuffer;
+}
+
+function log10(x) {
+ return Math.log(x) / Math.LN10;
+}
+
+function linearToDecibel(x) {
+ return 20 * log10(x);
+}
+
+// Verify that the rendered result is very close to the reference
+// triangular pulse.
+function checkTriangularPulse(rendered, reference, should) {
+ let match = true;
+ let maxDelta = 0;
+ let valueAtMaxDelta = 0;
+ let maxDeltaIndex = 0;
+
+ for (let i = 0; i < reference.length; ++i) {
+ let diff = rendered[i] - reference[i];
+ let x = Math.abs(diff);
+ if (x > maxDelta) {
+ maxDelta = x;
+ valueAtMaxDelta = reference[i];
+ maxDeltaIndex = i;
+ }
+ }
+
+ // allowedDeviationFraction was determined experimentally. It
+ // is the threshold of the relative error at the maximum
+ // difference between the true triangular pulse and the
+ // rendered pulse.
+ let allowedDeviationDecibels = -124.41;
+ let maxDeviationDecibels = linearToDecibel(maxDelta / valueAtMaxDelta);
+
+ should(
+ maxDeviationDecibels,
+ 'Deviation (in dB) of triangular portion of convolution')
+ .beLessThanOrEqualTo(allowedDeviationDecibels);
+
+ return match;
+}
+
+// Verify that the rendered data is close to zero for the first part
+// of the tail.
+function checkTail1(data, reference, breakpoint, should) {
+ let isZero = true;
+ let tail1Max = 0;
+
+ for (let i = reference.length; i < reference.length + breakpoint; ++i) {
+ let mag = Math.abs(data[i]);
+ if (mag > tail1Max) {
+ tail1Max = mag;
+ }
+ }
+
+ // Let's find the peak of the reference (even though we know a
+ // priori what it is).
+ let refMax = 0;
+ for (let i = 0; i < reference.length; ++i) {
+ refMax = Math.max(refMax, Math.abs(reference[i]));
+ }
+
+ // This threshold is experimentally determined by examining the
+ // value of tail1MaxDecibels.
+ let threshold1 = -129.7;
+
+ let tail1MaxDecibels = linearToDecibel(tail1Max / refMax);
+ should(tail1MaxDecibels, 'Deviation in first part of tail of convolutions')
+ .beLessThanOrEqualTo(threshold1);
+
+ return isZero;
+}
+
+// Verify that the second part of the tail of the convolution is
+// exactly zero.
+function checkTail2(data, reference, breakpoint, should) {
+ let isZero = true;
+ let tail2Max = 0;
+ // For the second part of the tail, the maximum value should be
+ // exactly zero.
+ let threshold2 = 0;
+ for (let i = reference.length + breakpoint; i < data.length; ++i) {
+ if (Math.abs(data[i]) > 0) {
+ isZero = false;
+ break;
+ }
+ }
+
+ should(isZero, 'Rendered signal after tail of convolution is silent')
+ .beTrue();
+
+ return isZero;
+}
+
+function checkConvolvedResult(renderedBuffer, trianglePulse, should) {
+ let referenceData = trianglePulse.getChannelData(0);
+ let renderedData = renderedBuffer.getChannelData(0);
+
+ let success = true;
+
+ // Verify the triangular pulse is actually triangular.
+
+ success =
+ success && checkTriangularPulse(renderedData, referenceData, should);
+
+ // Make sure that portion after convolved portion is totally
+ // silent. But round-off prevents this from being completely
+ // true. At the end of the triangle, it should be close to
+ // zero. If we go farther out, it should be even closer and
+ // eventually zero.
+
+ // For the tail of the convolution (where the result would be
+ // theoretically zero), we partition the tail into two
+ // parts. The first is the at the beginning of the tail,
+ // where we tolerate a small but non-zero value. The second part is
+ // farther along the tail where the result should be zero.
+
+ // breakpoint is the point dividing the first two tail parts
+ // we're looking at. Experimentally determined.
+ let breakpoint = 12800;
+
+ success =
+ success && checkTail1(renderedData, referenceData, breakpoint, should);
+
+ success =
+ success && checkTail2(renderedData, referenceData, breakpoint, should);
+
+ should(success, 'Test signal convolved').message('correctly', 'incorrectly');
+}
diff --git a/testing/web-platform/tests/webaudio/resources/delay-testing.js b/testing/web-platform/tests/webaudio/resources/delay-testing.js
new file mode 100644
index 0000000000..9033da6730
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/delay-testing.js
@@ -0,0 +1,66 @@
+let sampleRate = 44100.0;
+
+let renderLengthSeconds = 4;
+let delayTimeSeconds = 0.5;
+let toneLengthSeconds = 2;
+
+function createToneBuffer(context, frequency, numberOfCycles, sampleRate) {
+ let duration = numberOfCycles / frequency;
+ let sampleFrameLength = duration * sampleRate;
+
+ let audioBuffer = context.createBuffer(1, sampleFrameLength, sampleRate);
+
+ let n = audioBuffer.length;
+ let data = audioBuffer.getChannelData(0);
+
+ for (let i = 0; i < n; ++i)
+ data[i] = Math.sin(frequency * 2.0 * Math.PI * i / sampleRate);
+
+ return audioBuffer;
+}
+
+function checkDelayedResult(renderedBuffer, toneBuffer, should) {
+ let sourceData = toneBuffer.getChannelData(0);
+ let renderedData = renderedBuffer.getChannelData(0);
+
+ let delayTimeFrames = delayTimeSeconds * sampleRate;
+ let toneLengthFrames = toneLengthSeconds * sampleRate;
+
+ let success = true;
+
+ let n = renderedBuffer.length;
+
+ for (let i = 0; i < n; ++i) {
+ if (i < delayTimeFrames) {
+ // Check that initial portion is 0 (since signal is delayed).
+ if (renderedData[i] != 0) {
+ should(
+ renderedData[i], 'Initial portion expected to be 0 at frame ' + i)
+ .beEqualTo(0);
+ success = false;
+ break;
+ }
+ } else if (i >= delayTimeFrames && i < delayTimeFrames + toneLengthFrames) {
+ // Make sure that the tone data is delayed by exactly the expected number
+ // of frames.
+ let j = i - delayTimeFrames;
+ if (renderedData[i] != sourceData[j]) {
+ should(renderedData[i], 'Actual data at frame ' + i)
+ .beEqualTo(sourceData[j]);
+ success = false;
+ break;
+ }
+ } else {
+ // Make sure we have silence after the delayed tone.
+ if (renderedData[i] != 0) {
+ should(renderedData[j], 'Final portion at frame ' + i).beEqualTo(0);
+ success = false;
+ break;
+ }
+ }
+ }
+
+ should(
+ success, 'Delaying test signal by ' + delayTimeSeconds + ' sec was done')
+ .message('correctly', 'incorrectly')
+}
diff --git a/testing/web-platform/tests/webaudio/resources/distance-model-testing.js b/testing/web-platform/tests/webaudio/resources/distance-model-testing.js
new file mode 100644
index 0000000000..f8a6cf940a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/distance-model-testing.js
@@ -0,0 +1,196 @@
+// Use a power of two to eliminate round-off when converting frames to time and
+// vice versa.
+let sampleRate = 32768;
+
+// How many panner nodes to create for the test.
+let nodesToCreate = 100;
+
+// Time step when each panner node starts. Make sure it starts on a frame
+// boundary.
+let timeStep = Math.floor(0.001 * sampleRate) / sampleRate;
+
+// Make sure we render long enough to get all of our nodes.
+let renderLengthSeconds = timeStep * (nodesToCreate + 1);
+
+// Length of an impulse signal.
+let pulseLengthFrames = Math.round(timeStep * sampleRate);
+
+// Globals to make debugging a little easier.
+let context;
+let impulse;
+let bufferSource;
+let panner;
+let position;
+let time;
+
+// For the record, these distance formulas were taken from the OpenAL
+// spec
+// (http://connect.creativelabs.com/openal/Documentation/OpenAL%201.1%20Specification.pdf),
+// not the code. The Web Audio spec follows the OpenAL formulas.
+
+function linearDistance(panner, x, y, z) {
+ let distance = Math.sqrt(x * x + y * y + z * z);
+ distance = Math.min(distance, panner.maxDistance);
+ let rolloff = panner.rolloffFactor;
+ let gain =
+ (1 -
+ rolloff * (distance - panner.refDistance) /
+ (panner.maxDistance - panner.refDistance));
+
+ return gain;
+}
+
+function inverseDistance(panner, x, y, z) {
+ let distance = Math.sqrt(x * x + y * y + z * z);
+ distance = Math.min(distance, panner.maxDistance);
+ let rolloff = panner.rolloffFactor;
+ let gain = panner.refDistance /
+ (panner.refDistance + rolloff * (distance - panner.refDistance));
+
+ return gain;
+}
+
+function exponentialDistance(panner, x, y, z) {
+ let distance = Math.sqrt(x * x + y * y + z * z);
+ distance = Math.min(distance, panner.maxDistance);
+ let rolloff = panner.rolloffFactor;
+ let gain = Math.pow(distance / panner.refDistance, -rolloff);
+
+ return gain;
+}
+
+// Map the distance model to the function that implements the model
+let distanceModelFunction = {
+ 'linear': linearDistance,
+ 'inverse': inverseDistance,
+ 'exponential': exponentialDistance
+};
+
+function createGraph(context, distanceModel, nodeCount) {
+ bufferSource = new Array(nodeCount);
+ panner = new Array(nodeCount);
+ position = new Array(nodeCount);
+ time = new Array(nodesToCreate);
+
+ impulse = createImpulseBuffer(context, pulseLengthFrames);
+
+ // Create all the sources and panners.
+ //
+ // We MUST use the EQUALPOWER panning model so that we can easily
+ // figure out the gain introduced by the panner.
+ //
+ // We want to stay in the middle of the panning range, which means
+ // we want to stay on the z-axis. If we don't, then the effect of
+ // panning model will be much more complicated. We're not testing
+ // the panner, but the distance model, so we want the panner effect
+ // to be simple.
+ //
+ // The panners are placed at a uniform intervals between the panner
+ // reference distance and the panner max distance. The source is
+ // also started at regular intervals.
+ for (let k = 0; k < nodeCount; ++k) {
+ bufferSource[k] = context.createBufferSource();
+ bufferSource[k].buffer = impulse;
+
+ panner[k] = context.createPanner();
+ panner[k].panningModel = 'equalpower';
+ panner[k].distanceModel = distanceModel;
+
+ let distanceStep =
+ (panner[k].maxDistance - panner[k].refDistance) / nodeCount;
+ position[k] = distanceStep * k + panner[k].refDistance;
+ panner[k].setPosition(0, 0, position[k]);
+
+ bufferSource[k].connect(panner[k]);
+ panner[k].connect(context.destination);
+
+ time[k] = k * timeStep;
+ bufferSource[k].start(time[k]);
+ }
+}
+
+// distanceModel should be the distance model string like
+// "linear", "inverse", or "exponential".
+function createTestAndRun(context, distanceModel, should) {
+ // To test the distance models, we create a number of panners at
+ // uniformly spaced intervals on the z-axis. Each of these are
+ // started at equally spaced time intervals. After rendering the
+ // signals, we examine where each impulse is located and the
+ // attenuation of the impulse. The attenuation is compared
+ // against our expected attenuation.
+
+ createGraph(context, distanceModel, nodesToCreate);
+
+ return context.startRendering().then(
+ buffer => checkDistanceResult(buffer, distanceModel, should));
+}
+
+// The gain caused by the EQUALPOWER panning model, if we stay on the
+// z axis, with the default orientations.
+function equalPowerGain() {
+ return Math.SQRT1_2;
+}
+
+function checkDistanceResult(renderedBuffer, model, should) {
+ renderedData = renderedBuffer.getChannelData(0);
+
+ // The max allowed error between the actual gain and the expected
+ // value. This is determined experimentally. Set to 0 to see
+ // what the actual errors are.
+ let maxAllowedError = 2.2720e-6;
+
+ let success = true;
+
+ // Number of impulses we found in the rendered result.
+ let impulseCount = 0;
+
+ // Maximum relative error in the gain of the impulses.
+ let maxError = 0;
+
+ // Array of locations of the impulses that were not at the
+ // expected location. (Contains the actual and expected frame
+ // of the impulse.)
+ let impulsePositionErrors = new Array();
+
+ // Step through the rendered data to find all the non-zero points
+ // so we can find where our distance-attenuated impulses are.
+ // These are tested against the expected attenuations at that
+ // distance.
+ for (let k = 0; k < renderedData.length; ++k) {
+ if (renderedData[k] != 0) {
+ // Convert from string to index.
+ let distanceFunction = distanceModelFunction[model];
+ let expected =
+ distanceFunction(panner[impulseCount], 0, 0, position[impulseCount]);
+
+ // Adjust for the center-panning of the EQUALPOWER panning
+ // model that we're using.
+ expected *= equalPowerGain();
+
+ let error = Math.abs(renderedData[k] - expected) / Math.abs(expected);
+
+ maxError = Math.max(maxError, Math.abs(error));
+
+ should(renderedData[k]).beCloseTo(expected, {threshold: maxAllowedError});
+
+ // Keep track of any impulses that aren't where we expect them
+ // to be.
+ let expectedOffset = timeToSampleFrame(time[impulseCount], sampleRate);
+ if (k != expectedOffset) {
+ impulsePositionErrors.push({actual: k, expected: expectedOffset});
+ }
+ ++impulseCount;
+ }
+ }
+ should(impulseCount, 'Number of impulses').beEqualTo(nodesToCreate);
+
+ should(maxError, 'Max error in distance gains')
+ .beLessThanOrEqualTo(maxAllowedError);
+
+ // Display any timing errors that we found.
+ if (impulsePositionErrors.length > 0) {
+ let actual = impulsePositionErrors.map(x => x.actual);
+ let expected = impulsePositionErrors.map(x => x.expected);
+ should(actual, 'Actual impulse positions found').beEqualToArray(expected);
+ }
+}
diff --git a/testing/web-platform/tests/webaudio/resources/merger-testing.js b/testing/web-platform/tests/webaudio/resources/merger-testing.js
new file mode 100644
index 0000000000..4477ec0a1f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/merger-testing.js
@@ -0,0 +1,24 @@
+// This file is for the audiochannelmerger-* layout tests.
+// Requires |audio-testing.js| to work properly.
+
+function testMergerInput(should, config) {
+ let context = new OfflineAudioContext(config.numberOfChannels, 128, 44100);
+ let merger = context.createChannelMerger(config.numberOfChannels);
+ let source = context.createBufferSource();
+ source.buffer = createConstantBuffer(context, 128, config.testBufferContent);
+
+ // Connect the output of source into the specified input of merger.
+ if (config.mergerInputIndex)
+ source.connect(merger, 0, config.mergerInputIndex);
+ else
+ source.connect(merger);
+ merger.connect(context.destination);
+ source.start();
+
+ return context.startRendering().then(function(buffer) {
+ let prefix = config.testBufferContent.length + '-channel source: ';
+ for (let i = 0; i < config.numberOfChannels; i++)
+ should(buffer.getChannelData(i), prefix + 'Channel #' + i)
+ .beConstantValueOf(config.expected[i]);
+ });
+}
diff --git a/testing/web-platform/tests/webaudio/resources/mix-testing.js b/testing/web-platform/tests/webaudio/resources/mix-testing.js
new file mode 100644
index 0000000000..63c8e1aca6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/mix-testing.js
@@ -0,0 +1,23 @@
+let toneLengthSeconds = 1;
+
+// Create a buffer with multiple channels.
+// The signal frequency in each channel is the multiple of that in the first
+// channel.
+function createToneBuffer(context, frequency, duration, numberOfChannels) {
+ let sampleRate = context.sampleRate;
+ let sampleFrameLength = duration * sampleRate;
+
+ let audioBuffer =
+ context.createBuffer(numberOfChannels, sampleFrameLength, sampleRate);
+
+ let n = audioBuffer.length;
+
+ for (let k = 0; k < numberOfChannels; ++k) {
+ let data = audioBuffer.getChannelData(k);
+
+ for (let i = 0; i < n; ++i)
+ data[i] = Math.sin(frequency * (k + 1) * 2.0 * Math.PI * i / sampleRate);
+ }
+
+ return audioBuffer;
+}
diff --git a/testing/web-platform/tests/webaudio/resources/mixing-rules.js b/testing/web-platform/tests/webaudio/resources/mixing-rules.js
new file mode 100644
index 0000000000..e06a1468a3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/mixing-rules.js
@@ -0,0 +1,350 @@
+// Utilities for mixing rule testing.
+// http://webaudio.github.io/web-audio-api/#channel-up-mixing-and-down-mixing
+
+
+/**
+ * Create an n-channel buffer, with all sample data zero except for a shifted
+ * impulse. The impulse position depends on the channel index. For example, for
+ * a 4-channel buffer:
+ * channel 0: 1 0 0 0 0 0 0 0
+ * channel 1: 0 1 0 0 0 0 0 0
+ * channel 2: 0 0 1 0 0 0 0 0
+ * channel 3: 0 0 0 1 0 0 0 0
+ * @param {AudioContext} context Associated AudioContext.
+ * @param {Number} numberOfChannels Number of channels of test buffer.
+ * @param {Number} frameLength Buffer length in frames.
+ * @return {AudioBuffer}
+ */
+function createShiftedImpulseBuffer(context, numberOfChannels, frameLength) {
+ let shiftedImpulseBuffer =
+ context.createBuffer(numberOfChannels, frameLength, context.sampleRate);
+ for (let channel = 0; channel < numberOfChannels; ++channel) {
+ let data = shiftedImpulseBuffer.getChannelData(channel);
+ data[channel] = 1;
+ }
+
+ return shiftedImpulseBuffer;
+}
+
+/**
+ * Create a string that displays the content of AudioBuffer.
+ * @param {AudioBuffer} audioBuffer AudioBuffer object to stringify.
+ * @param {Number} frameLength Number of frames to be printed.
+ * @param {Number} frameOffset Starting frame position for printing.
+ * @return {String}
+ */
+function stringifyBuffer(audioBuffer, frameLength, frameOffset) {
+ frameOffset = (frameOffset || 0);
+
+ let stringifiedBuffer = '';
+ for (let channel = 0; channel < audioBuffer.numberOfChannels; ++channel) {
+ let channelData = audioBuffer.getChannelData(channel);
+ for (let i = 0; i < frameLength; ++i)
+ stringifiedBuffer += channelData[i + frameOffset] + ' ';
+ stringifiedBuffer += '\n';
+ }
+
+ return stringifiedBuffer;
+}
+
+/**
+ * Compute number of channels from the connection.
+ * http://webaudio.github.io/web-audio-api/#dfn-computednumberofchannels
+ * @param {String} connections A string specifies the connection. For
+ * example, the string "128" means 3
+ * connections, having 1, 2, and 8 channels
+ * respectively.
+ * @param {Number} channelCount Channel count.
+ * @param {String} channelCountMode Channel count mode.
+ * @return {Number} Computed number of channels.
+ */
+function computeNumberOfChannels(connections, channelCount, channelCountMode) {
+ if (channelCountMode == 'explicit')
+ return channelCount;
+
+ // Must have at least one channel.
+ let computedNumberOfChannels = 1;
+
+ // Compute "computedNumberOfChannels" based on all the connections.
+ for (let i = 0; i < connections.length; ++i) {
+ let connectionNumberOfChannels = parseInt(connections[i]);
+ computedNumberOfChannels =
+ Math.max(computedNumberOfChannels, connectionNumberOfChannels);
+ }
+
+ if (channelCountMode == 'clamped-max')
+ computedNumberOfChannels = Math.min(computedNumberOfChannels, channelCount);
+
+ return computedNumberOfChannels;
+}
+
+/**
+ * Apply up/down-mixing (in-place summing) based on 'speaker' interpretation.
+ * @param {AudioBuffer} input Input audio buffer.
+ * @param {AudioBuffer} output Output audio buffer.
+ */
+function speakersSum(input, output) {
+ if (input.length != output.length) {
+ throw '[mixing-rules.js] speakerSum(): buffer lengths mismatch (input: ' +
+ input.length + ', output: ' + output.length + ')';
+ }
+
+ if (input.numberOfChannels === output.numberOfChannels) {
+ for (let channel = 0; channel < output.numberOfChannels; ++channel) {
+ let inputChannel = input.getChannelData(channel);
+ let outputChannel = output.getChannelData(channel);
+ for (let i = 0; i < outputChannel.length; i++)
+ outputChannel[i] += inputChannel[i];
+ }
+ } else if (input.numberOfChannels < output.numberOfChannels) {
+ processUpMix(input, output);
+ } else {
+ processDownMix(input, output);
+ }
+}
+
+/**
+ * In-place summing to |output| based on 'discrete' channel interpretation.
+ * @param {AudioBuffer} input Input audio buffer.
+ * @param {AudioBuffer} output Output audio buffer.
+ */
+function discreteSum(input, output) {
+ if (input.length != output.length) {
+ throw '[mixing-rules.js] speakerSum(): buffer lengths mismatch (input: ' +
+ input.length + ', output: ' + output.length + ')';
+ }
+
+ let numberOfChannels =
+ Math.min(input.numberOfChannels, output.numberOfChannels)
+
+ for (let channel = 0; channel < numberOfChannels; ++channel) {
+ let inputChannel = input.getChannelData(channel);
+ let outputChannel = output.getChannelData(channel);
+ for (let i = 0; i < outputChannel.length; i++)
+ outputChannel[i] += inputChannel[i];
+ }
+}
+
+/**
+ * Perform up-mix by in-place summing to |output| buffer.
+ * @param {AudioBuffer} input Input audio buffer.
+ * @param {AudioBuffer} output Output audio buffer.
+ */
+function processUpMix(input, output) {
+ let numberOfInputChannels = input.numberOfChannels;
+ let numberOfOutputChannels = output.numberOfChannels;
+ let i, length = output.length;
+
+ // Up-mixing: 1 -> 2, 1 -> 4
+ // output.L += input
+ // output.R += input
+ // output.SL += 0 (in the case of 1 -> 4)
+ // output.SR += 0 (in the case of 1 -> 4)
+ if ((numberOfInputChannels === 1 && numberOfOutputChannels === 2) ||
+ (numberOfInputChannels === 1 && numberOfOutputChannels === 4)) {
+ let inputChannel = input.getChannelData(0);
+ let outputChannel0 = output.getChannelData(0);
+ let outputChannel1 = output.getChannelData(1);
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += inputChannel[i];
+ outputChannel1[i] += inputChannel[i];
+ }
+
+ return;
+ }
+
+ // Up-mixing: 1 -> 5.1
+ // output.L += 0
+ // output.R += 0
+ // output.C += input
+ // output.LFE += 0
+ // output.SL += 0
+ // output.SR += 0
+ if (numberOfInputChannels == 1 && numberOfOutputChannels == 6) {
+ let inputChannel = input.getChannelData(0);
+ let outputChannel2 = output.getChannelData(2);
+ for (i = 0; i < length; i++)
+ outputChannel2[i] += inputChannel[i];
+
+ return;
+ }
+
+ // Up-mixing: 2 -> 4, 2 -> 5.1
+ // output.L += input.L
+ // output.R += input.R
+ // output.C += 0 (in the case of 2 -> 5.1)
+ // output.LFE += 0 (in the case of 2 -> 5.1)
+ // output.SL += 0
+ // output.SR += 0
+ if ((numberOfInputChannels === 2 && numberOfOutputChannels === 4) ||
+ (numberOfInputChannels === 2 && numberOfOutputChannels === 6)) {
+ let inputChannel0 = input.getChannelData(0);
+ let inputChannel1 = input.getChannelData(1);
+ let outputChannel0 = output.getChannelData(0);
+ let outputChannel1 = output.getChannelData(1);
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += inputChannel0[i];
+ outputChannel1[i] += inputChannel1[i];
+ }
+
+ return;
+ }
+
+ // Up-mixing: 4 -> 5.1
+ // output.L += input.L
+ // output.R += input.R
+ // output.C += 0
+ // output.LFE += 0
+ // output.SL += input.SL
+ // output.SR += input.SR
+ if (numberOfInputChannels === 4 && numberOfOutputChannels === 6) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let inputChannel2 = input.getChannelData(2); // input.SL
+ let inputChannel3 = input.getChannelData(3); // input.SR
+ let outputChannel0 = output.getChannelData(0); // output.L
+ let outputChannel1 = output.getChannelData(1); // output.R
+ let outputChannel4 = output.getChannelData(4); // output.SL
+ let outputChannel5 = output.getChannelData(5); // output.SR
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += inputChannel0[i];
+ outputChannel1[i] += inputChannel1[i];
+ outputChannel4[i] += inputChannel2[i];
+ outputChannel5[i] += inputChannel3[i];
+ }
+
+ return;
+ }
+
+ // All other cases, fall back to the discrete sum.
+ discreteSum(input, output);
+}
+
+/**
+ * Perform down-mix by in-place summing to |output| buffer.
+ * @param {AudioBuffer} input Input audio buffer.
+ * @param {AudioBuffer} output Output audio buffer.
+ */
+function processDownMix(input, output) {
+ let numberOfInputChannels = input.numberOfChannels;
+ let numberOfOutputChannels = output.numberOfChannels;
+ let i, length = output.length;
+
+ // Down-mixing: 2 -> 1
+ // output += 0.5 * (input.L + input.R)
+ if (numberOfInputChannels === 2 && numberOfOutputChannels === 1) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let outputChannel0 = output.getChannelData(0);
+ for (i = 0; i < length; i++)
+ outputChannel0[i] += 0.5 * (inputChannel0[i] + inputChannel1[i]);
+
+ return;
+ }
+
+ // Down-mixing: 4 -> 1
+ // output += 0.25 * (input.L + input.R + input.SL + input.SR)
+ if (numberOfInputChannels === 4 && numberOfOutputChannels === 1) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let inputChannel2 = input.getChannelData(2); // input.SL
+ let inputChannel3 = input.getChannelData(3); // input.SR
+ let outputChannel0 = output.getChannelData(0);
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += 0.25 *
+ (inputChannel0[i] + inputChannel1[i] + inputChannel2[i] +
+ inputChannel3[i]);
+ }
+
+ return;
+ }
+
+ // Down-mixing: 5.1 -> 1
+ // output += sqrt(1/2) * (input.L + input.R) + input.C
+ // + 0.5 * (input.SL + input.SR)
+ if (numberOfInputChannels === 6 && numberOfOutputChannels === 1) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let inputChannel2 = input.getChannelData(2); // input.C
+ let inputChannel4 = input.getChannelData(4); // input.SL
+ let inputChannel5 = input.getChannelData(5); // input.SR
+ let outputChannel0 = output.getChannelData(0);
+ let scaleSqrtHalf = Math.sqrt(0.5);
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] +=
+ scaleSqrtHalf * (inputChannel0[i] + inputChannel1[i]) +
+ inputChannel2[i] + 0.5 * (inputChannel4[i] + inputChannel5[i]);
+ }
+
+ return;
+ }
+
+ // Down-mixing: 4 -> 2
+ // output.L += 0.5 * (input.L + input.SL)
+ // output.R += 0.5 * (input.R + input.SR)
+ if (numberOfInputChannels == 4 && numberOfOutputChannels == 2) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let inputChannel2 = input.getChannelData(2); // input.SL
+ let inputChannel3 = input.getChannelData(3); // input.SR
+ let outputChannel0 = output.getChannelData(0); // output.L
+ let outputChannel1 = output.getChannelData(1); // output.R
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += 0.5 * (inputChannel0[i] + inputChannel2[i]);
+ outputChannel1[i] += 0.5 * (inputChannel1[i] + inputChannel3[i]);
+ }
+
+ return;
+ }
+
+ // Down-mixing: 5.1 -> 2
+ // output.L += input.L + sqrt(1/2) * (input.C + input.SL)
+ // output.R += input.R + sqrt(1/2) * (input.C + input.SR)
+ if (numberOfInputChannels == 6 && numberOfOutputChannels == 2) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let inputChannel2 = input.getChannelData(2); // input.C
+ let inputChannel4 = input.getChannelData(4); // input.SL
+ let inputChannel5 = input.getChannelData(5); // input.SR
+ let outputChannel0 = output.getChannelData(0); // output.L
+ let outputChannel1 = output.getChannelData(1); // output.R
+ let scaleSqrtHalf = Math.sqrt(0.5);
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += inputChannel0[i] +
+ scaleSqrtHalf * (inputChannel2[i] + inputChannel4[i]);
+ outputChannel1[i] += inputChannel1[i] +
+ scaleSqrtHalf * (inputChannel2[i] + inputChannel5[i]);
+ }
+
+ return;
+ }
+
+ // Down-mixing: 5.1 -> 4
+ // output.L += input.L + sqrt(1/2) * input.C
+ // output.R += input.R + sqrt(1/2) * input.C
+ // output.SL += input.SL
+ // output.SR += input.SR
+ if (numberOfInputChannels === 6 && numberOfOutputChannels === 4) {
+ let inputChannel0 = input.getChannelData(0); // input.L
+ let inputChannel1 = input.getChannelData(1); // input.R
+ let inputChannel2 = input.getChannelData(2); // input.C
+ let inputChannel4 = input.getChannelData(4); // input.SL
+ let inputChannel5 = input.getChannelData(5); // input.SR
+ let outputChannel0 = output.getChannelData(0); // output.L
+ let outputChannel1 = output.getChannelData(1); // output.R
+ let outputChannel2 = output.getChannelData(2); // output.SL
+ let outputChannel3 = output.getChannelData(3); // output.SR
+ let scaleSqrtHalf = Math.sqrt(0.5);
+ for (i = 0; i < length; i++) {
+ outputChannel0[i] += inputChannel0[i] + scaleSqrtHalf * inputChannel2[i];
+ outputChannel1[i] += inputChannel1[i] + scaleSqrtHalf * inputChannel2[i];
+ outputChannel2[i] += inputChannel4[i];
+ outputChannel3[i] += inputChannel5[i];
+ }
+
+ return;
+ }
+
+ // All other cases, fall back to the discrete sum.
+ discreteSum(input, output);
+}
diff --git a/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js b/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js
new file mode 100644
index 0000000000..ad0631670d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/note-grain-on-testing.js
@@ -0,0 +1,165 @@
+// Use a power of two to eliminate round-off converting from frames to time.
+let sampleRate = 32768;
+
+// How many grains to play.
+let numberOfTests = 100;
+
+// Duration of each grain to be played. Make a whole number of frames
+let duration = Math.floor(0.01 * sampleRate) / sampleRate;
+
+// A little extra bit of silence between grain boundaries. Must be a whole
+// number of frames.
+let grainGap = Math.floor(0.005 * sampleRate) / sampleRate;
+
+// Time step between the start of each grain. We need to add a little
+// bit of silence so we can detect grain boundaries
+let timeStep = duration + grainGap;
+
+// Time step between the start for each grain. Must be a whole number of
+// frames.
+let grainOffsetStep = Math.floor(0.001 * sampleRate) / sampleRate;
+
+// How long to render to cover all of the grains.
+let renderTime = (numberOfTests + 1) * timeStep;
+
+let context;
+let renderedData;
+
+// Create a buffer containing the data that we want. The function f
+// returns the desired value at sample frame k.
+function createSignalBuffer(context, f) {
+ // Make sure the buffer has enough data for all of the possible
+ // grain offsets and durations. The additional 1 is for any
+ // round-off errors.
+ let signalLength =
+ Math.floor(1 + sampleRate * (numberOfTests * grainOffsetStep + duration));
+
+ let buffer = context.createBuffer(2, signalLength, sampleRate);
+ let data = buffer.getChannelData(0);
+
+ for (let k = 0; k < signalLength; ++k) {
+ data[k] = f(k);
+ }
+
+ return buffer;
+}
+
+// From the data array, find the start and end sample frame for each
+// grain. This depends on the data having 0's between grain, and
+// that the grain is always strictly non-zero.
+function findStartAndEndSamples(data) {
+ let nSamples = data.length;
+
+ let startTime = [];
+ let endTime = [];
+ let lookForStart = true;
+
+ // Look through the rendered data to find the start and stop
+ // times of each grain.
+ for (let k = 0; k < nSamples; ++k) {
+ if (lookForStart) {
+ // Find a non-zero point and record the start. We're not
+ // concerned with the value in this test, only that the
+ // grain started here.
+ if (renderedData[k]) {
+ startTime.push(k);
+ lookForStart = false;
+ }
+ } else {
+ // Find a zero and record the end of the grain.
+ if (!renderedData[k]) {
+ endTime.push(k);
+ lookForStart = true;
+ }
+ }
+ }
+
+ return {start: startTime, end: endTime};
+}
+
+function playGrain(context, source, time, offset, duration) {
+ let bufferSource = context.createBufferSource();
+
+ bufferSource.buffer = source;
+ bufferSource.connect(context.destination);
+ bufferSource.start(time, offset, duration);
+}
+
+// Play out all grains. Returns a object containing two arrays, one
+// for the start time and one for the grain offset time.
+function playAllGrains(context, source, numberOfNotes) {
+ let startTimes = new Array(numberOfNotes);
+ let offsets = new Array(numberOfNotes);
+
+ for (let k = 0; k < numberOfNotes; ++k) {
+ let timeOffset = k * timeStep;
+ let grainOffset = k * grainOffsetStep;
+
+ playGrain(context, source, timeOffset, grainOffset, duration);
+ startTimes[k] = timeOffset;
+ offsets[k] = grainOffset;
+ }
+
+ return {startTimes: startTimes, grainOffsetTimes: offsets};
+}
+
+// Verify that the start and end frames for each grain match our
+// expected start and end frames.
+function verifyStartAndEndFrames(startEndFrames, should) {
+ let startFrames = startEndFrames.start;
+ let endFrames = startEndFrames.end;
+
+ // Count of how many grains started at the incorrect time.
+ let errorCountStart = 0;
+
+ // Count of how many grains ended at the incorrect time.
+ let errorCountEnd = 0;
+
+ should(
+ startFrames.length == endFrames.length, 'Found all grain starts and ends')
+ .beTrue();
+
+ should(startFrames.length, 'Number of start frames').beEqualTo(numberOfTests);
+ should(endFrames.length, 'Number of end frames').beEqualTo(numberOfTests);
+
+ // Examine the start and stop times to see if they match our
+ // expectations.
+ for (let k = 0; k < startFrames.length; ++k) {
+ let expectedStart = timeToSampleFrame(k * timeStep, sampleRate);
+ // The end point is the duration.
+ let expectedEnd = expectedStart +
+ grainLengthInSampleFrames(k * grainOffsetStep, duration, sampleRate);
+
+ if (startFrames[k] != expectedStart)
+ ++errorCountStart;
+ if (endFrames[k] != expectedEnd)
+ ++errorCountEnd;
+
+ should([startFrames[k], endFrames[k]], 'Pulse ' + k + ' boundary')
+ .beEqualToArray([expectedStart, expectedEnd]);
+ }
+
+ // Check that all the grains started or ended at the correct time.
+ if (!errorCountStart) {
+ should(
+ startFrames.length, 'Number of grains that started at the correct time')
+ .beEqualTo(numberOfTests);
+ } else {
+ should(
+ errorCountStart,
+ 'Number of grains out of ' + numberOfTests +
+ 'that started at the wrong time')
+ .beEqualTo(0);
+ }
+
+ if (!errorCountEnd) {
+ should(endFrames.length, 'Number of grains that ended at the correct time')
+ .beEqualTo(numberOfTests);
+ } else {
+ should(
+ errorCountEnd,
+ 'Number of grains out of ' + numberOfTests +
+ ' that ended at the wrong time')
+ .beEqualTo(0);
+ }
+}
diff --git a/testing/web-platform/tests/webaudio/resources/panner-formulas.js b/testing/web-platform/tests/webaudio/resources/panner-formulas.js
new file mode 100644
index 0000000000..ae6f516668
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/panner-formulas.js
@@ -0,0 +1,190 @@
+// For the record, these distance formulas were taken from the OpenAL
+// spec
+// (http://connect.creativelabs.com/openal/Documentation/OpenAL%201.1%20Specification.pdf),
+// not the code. The Web Audio spec follows the OpenAL formulas.
+
+function linearDistance(panner, x, y, z) {
+ let distance = Math.sqrt(x * x + y * y + z * z);
+ let dref = Math.min(panner.refDistance, panner.maxDistance);
+ let dmax = Math.max(panner.refDistance, panner.maxDistance);
+ distance = Math.max(Math.min(distance, dmax), dref);
+ let rolloff = Math.max(Math.min(panner.rolloffFactor, 1), 0);
+ if (dref === dmax)
+ return 1 - rolloff;
+
+ let gain = (1 - rolloff * (distance - dref) / (dmax - dref));
+
+ return gain;
+}
+
+function inverseDistance(panner, x, y, z) {
+ let distance = Math.sqrt(x * x + y * y + z * z);
+ distance = Math.max(distance, panner.refDistance);
+ let rolloff = panner.rolloffFactor;
+ let gain = panner.refDistance /
+ (panner.refDistance +
+ rolloff * (Math.max(distance, panner.refDistance) - panner.refDistance));
+
+ return gain;
+}
+
+function exponentialDistance(panner, x, y, z) {
+ let distance = Math.sqrt(x * x + y * y + z * z);
+ distance = Math.max(distance, panner.refDistance);
+ let rolloff = panner.rolloffFactor;
+ let gain = Math.pow(distance / panner.refDistance, -rolloff);
+
+ return gain;
+}
+
+// Simple implementations of 3D vectors implemented as a 3-element array.
+
+// x - y
+function vec3Sub(x, y) {
+ let z = new Float32Array(3);
+ z[0] = x[0] - y[0];
+ z[1] = x[1] - y[1];
+ z[2] = x[2] - y[2];
+
+ return z;
+}
+
+// x/|x|
+function vec3Normalize(x) {
+ let mag = Math.hypot(...x);
+ return x.map(function(c) {
+ return c / mag;
+ });
+}
+
+// x == 0?
+function vec3IsZero(x) {
+ return x[0] === 0 && x[1] === 0 && x[2] === 0;
+}
+
+// Vector cross product
+function vec3Cross(u, v) {
+ let cross = new Float32Array(3);
+ cross[0] = u[1] * v[2] - u[2] * v[1];
+ cross[1] = u[2] * v[0] - u[0] * v[2];
+ cross[2] = u[0] * v[1] - u[1] * v[0];
+ return cross;
+}
+
+// Dot product
+function vec3Dot(x, y) {
+ return x[0] * y[0] + x[1] * y[1] + x[2] * y[2];
+}
+
+// a*x, for scalar a
+function vec3Scale(a, x) {
+ return x.map(function(c) {
+ return a * c;
+ });
+}
+
+function calculateAzimuth(source, listener, listenerForward, listenerUp) {
+ let sourceListener = vec3Sub(source, listener);
+
+ if (vec3IsZero(sourceListener))
+ return 0;
+
+ sourceListener = vec3Normalize(sourceListener);
+
+ let listenerRight = vec3Normalize(vec3Cross(listenerForward, listenerUp));
+ let listenerForwardNorm = vec3Normalize(listenerForward);
+
+ let up = vec3Cross(listenerRight, listenerForwardNorm);
+ let upProjection = vec3Dot(sourceListener, up);
+
+ let projectedSource =
+ vec3Normalize(vec3Sub(sourceListener, vec3Scale(upProjection, up)));
+
+ let azimuth =
+ 180 / Math.PI * Math.acos(vec3Dot(projectedSource, listenerRight));
+
+ // Source in front or behind the listener
+ let frontBack = vec3Dot(projectedSource, listenerForwardNorm);
+ if (frontBack < 0)
+ azimuth = 360 - azimuth;
+
+ // Make azimuth relative to "front" and not "right" listener vector.
+ if (azimuth >= 0 && azimuth <= 270)
+ azimuth = 90 - azimuth;
+ else
+ azimuth = 450 - azimuth;
+
+ // We don't need elevation, so we're skipping that computation.
+ return azimuth;
+}
+
+// Map our position angle to the azimuth angle (in degrees).
+//
+// An angle of 0 corresponds to an azimuth of 90 deg; pi, to -90 deg.
+function angleToAzimuth(angle) {
+ return 90 - angle * 180 / Math.PI;
+}
+
+// The gain caused by the EQUALPOWER panning model
+function equalPowerGain(azimuth, numberOfChannels) {
+ let halfPi = Math.PI / 2;
+
+ if (azimuth < -90)
+ azimuth = -180 - azimuth;
+ else
+ azimuth = 180 - azimuth;
+
+ if (numberOfChannels == 1) {
+ let panPosition = (azimuth + 90) / 180;
+
+ let gainL = Math.cos(halfPi * panPosition);
+ let gainR = Math.sin(halfPi * panPosition);
+
+ return {left: gainL, right: gainR};
+ } else {
+ if (azimuth <= 0) {
+ let panPosition = (azimuth + 90) / 90;
+
+ let gainL = Math.cos(halfPi * panPosition);
+ let gainR = Math.sin(halfPi * panPosition);
+
+ return {left: gainL, right: gainR};
+ } else {
+ let panPosition = azimuth / 90;
+
+ let gainL = Math.cos(halfPi * panPosition);
+ let gainR = Math.sin(halfPi * panPosition);
+
+ return {left: gainL, right: gainR};
+ }
+ }
+}
+
+function applyPanner(azimuth, srcL, srcR, numberOfChannels) {
+ let length = srcL.length;
+ let outL = new Float32Array(length);
+ let outR = new Float32Array(length);
+
+ if (numberOfChannels == 1) {
+ for (let k = 0; k < length; ++k) {
+ let gains = equalPowerGain(azimuth[k], numberOfChannels);
+
+ outL[k] = srcL[k] * gains.left;
+ outR[k] = srcR[k] * gains.right;
+ }
+ } else {
+ for (let k = 0; k < length; ++k) {
+ let gains = equalPowerGain(azimuth[k], numberOfChannels);
+
+ if (azimuth[k] <= 0) {
+ outL[k] = srcL[k] + srcR[k] * gains.left;
+ outR[k] = srcR[k] * gains.right;
+ } else {
+ outL[k] = srcL[k] * gains.left;
+ outR[k] = srcR[k] + srcL[k] * gains.right;
+ }
+ }
+ }
+
+ return {left: outL, right: outR};
+}
diff --git a/testing/web-platform/tests/webaudio/resources/panner-model-testing.js b/testing/web-platform/tests/webaudio/resources/panner-model-testing.js
new file mode 100644
index 0000000000..4df3e17813
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/panner-model-testing.js
@@ -0,0 +1,184 @@
+// Use a power of two to eliminate round-off when converting frames to time and
+// vice versa.
+let sampleRate = 32768;
+
+let numberOfChannels = 1;
+
+// Time step when each panner node starts. Make sure it starts on a frame
+// boundary.
+let timeStep = Math.floor(0.001 * sampleRate) / sampleRate;
+
+// Length of the impulse signal.
+let pulseLengthFrames = Math.round(timeStep * sampleRate);
+
+// How many panner nodes to create for the test
+let nodesToCreate = 100;
+
+// Be sure we render long enough for all of our nodes.
+let renderLengthSeconds = timeStep * (nodesToCreate + 1);
+
+// These are global mostly for debugging.
+let context;
+let impulse;
+let bufferSource;
+let panner;
+let position;
+let time;
+
+let renderedBuffer;
+let renderedLeft;
+let renderedRight;
+
+function createGraph(context, nodeCount, positionSetter) {
+ bufferSource = new Array(nodeCount);
+ panner = new Array(nodeCount);
+ position = new Array(nodeCount);
+ time = new Array(nodeCount);
+ // Angle between panner locations. (nodeCount - 1 because we want
+ // to include both 0 and 180 deg.
+ let angleStep = Math.PI / (nodeCount - 1);
+
+ if (numberOfChannels == 2) {
+ impulse = createStereoImpulseBuffer(context, pulseLengthFrames);
+ } else
+ impulse = createImpulseBuffer(context, pulseLengthFrames);
+
+ for (let k = 0; k < nodeCount; ++k) {
+ bufferSource[k] = context.createBufferSource();
+ bufferSource[k].buffer = impulse;
+
+ panner[k] = context.createPanner();
+ panner[k].panningModel = 'equalpower';
+ panner[k].distanceModel = 'linear';
+
+ let angle = angleStep * k;
+ position[k] = {angle: angle, x: Math.cos(angle), z: Math.sin(angle)};
+ positionSetter(panner[k], position[k].x, 0, position[k].z);
+
+ bufferSource[k].connect(panner[k]);
+ panner[k].connect(context.destination);
+
+ // Start the source
+ time[k] = k * timeStep;
+ bufferSource[k].start(time[k]);
+ }
+}
+
+function createTestAndRun(
+ context, should, nodeCount, numberOfSourceChannels, positionSetter) {
+ numberOfChannels = numberOfSourceChannels;
+
+ createGraph(context, nodeCount, positionSetter);
+
+ return context.startRendering().then(buffer => checkResult(buffer, should));
+}
+
+// Map our position angle to the azimuth angle (in degrees).
+//
+// An angle of 0 corresponds to an azimuth of 90 deg; pi, to -90 deg.
+function angleToAzimuth(angle) {
+ return 90 - angle * 180 / Math.PI;
+}
+
+// The gain caused by the EQUALPOWER panning model
+function equalPowerGain(angle) {
+ let azimuth = angleToAzimuth(angle);
+
+ if (numberOfChannels == 1) {
+ let panPosition = (azimuth + 90) / 180;
+
+ let gainL = Math.cos(0.5 * Math.PI * panPosition);
+ let gainR = Math.sin(0.5 * Math.PI * panPosition);
+
+ return {left: gainL, right: gainR};
+ } else {
+ if (azimuth <= 0) {
+ let panPosition = (azimuth + 90) / 90;
+
+ let gainL = 1 + Math.cos(0.5 * Math.PI * panPosition);
+ let gainR = Math.sin(0.5 * Math.PI * panPosition);
+
+ return {left: gainL, right: gainR};
+ } else {
+ let panPosition = azimuth / 90;
+
+ let gainL = Math.cos(0.5 * Math.PI * panPosition);
+ let gainR = 1 + Math.sin(0.5 * Math.PI * panPosition);
+
+ return {left: gainL, right: gainR};
+ }
+ }
+}
+
+function checkResult(renderedBuffer, should) {
+ renderedLeft = renderedBuffer.getChannelData(0);
+ renderedRight = renderedBuffer.getChannelData(1);
+
+ // The max error we allow between the rendered impulse and the
+ // expected value. This value is experimentally determined. Set
+ // to 0 to make the test fail to see what the actual error is.
+ let maxAllowedError = 1.1597e-6;
+
+ let success = true;
+
+ // Number of impulses found in the rendered result.
+ let impulseCount = 0;
+
+ // Max (relative) error and the index of the maxima for the left
+ // and right channels.
+ let maxErrorL = 0;
+ let maxErrorIndexL = 0;
+ let maxErrorR = 0;
+ let maxErrorIndexR = 0;
+
+ // Number of impulses that don't match our expected locations.
+ let timeCount = 0;
+
+ // Locations of where the impulses aren't at the expected locations.
+ let timeErrors = new Array();
+
+ for (let k = 0; k < renderedLeft.length; ++k) {
+ // We assume that the left and right channels start at the same instant.
+ if (renderedLeft[k] != 0 || renderedRight[k] != 0) {
+ // The expected gain for the left and right channels.
+ let pannerGain = equalPowerGain(position[impulseCount].angle);
+ let expectedL = pannerGain.left;
+ let expectedR = pannerGain.right;
+
+ // Absolute error in the gain.
+ let errorL = Math.abs(renderedLeft[k] - expectedL);
+ let errorR = Math.abs(renderedRight[k] - expectedR);
+
+ if (Math.abs(errorL) > maxErrorL) {
+ maxErrorL = Math.abs(errorL);
+ maxErrorIndexL = impulseCount;
+ }
+ if (Math.abs(errorR) > maxErrorR) {
+ maxErrorR = Math.abs(errorR);
+ maxErrorIndexR = impulseCount;
+ }
+
+ // Keep track of the impulses that didn't show up where we
+ // expected them to be.
+ let expectedOffset = timeToSampleFrame(time[impulseCount], sampleRate);
+ if (k != expectedOffset) {
+ timeErrors[timeCount] = {actual: k, expected: expectedOffset};
+ ++timeCount;
+ }
+ ++impulseCount;
+ }
+ }
+
+ should(impulseCount, 'Number of impulses found').beEqualTo(nodesToCreate);
+
+ should(
+ timeErrors.map(x => x.actual),
+ 'Offsets of impulses at the wrong position')
+ .beEqualToArray(timeErrors.map(x => x.expected));
+
+ should(maxErrorL, 'Error in left channel gain values')
+ .beLessThanOrEqualTo(maxAllowedError);
+
+ should(maxErrorR, 'Error in right channel gain values')
+ .beLessThanOrEqualTo(maxAllowedError);
+}
diff --git a/testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wav b/testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wav
new file mode 100644
index 0000000000..f660c3c4b8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/sin_440Hz_-6dBFS_1s.wav
Binary files differ
diff --git a/testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js b/testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js
new file mode 100644
index 0000000000..0d2ea12f6d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/start-stop-exceptions.js
@@ -0,0 +1,45 @@
+// Test that exceptions are throw for invalid values for start and
+// stop.
+function testStartStop(should, node, options) {
+ // Test non-finite values for start. These should all throw a TypeError
+ const nonFiniteValues = [NaN, Infinity, -Infinity];
+
+ nonFiniteValues.forEach(time => {
+ should(() => {
+ node.start(time);
+ }, `start(${time})`)
+ .throw(TypeError);
+ });
+
+ should(() => {
+ node.stop();
+ }, 'Calling stop() before start()').throw(DOMException, 'InvalidStateError');
+
+ should(() => {
+ node.start(-1);
+ }, 'start(-1)').throw(RangeError);
+
+ if (options) {
+ options.forEach(test => {
+ should(() => {node.start(...test.args)},
+ 'start(' + test.args + ')').throw(test.errorType);
+ });
+ }
+
+ node.start();
+ should(() => {
+ node.start();
+ }, 'Calling start() twice').throw(DOMException, 'InvalidStateError');
+ should(() => {
+ node.stop(-1);
+ }, 'stop(-1)').throw(RangeError);
+
+ // Test non-finite stop times
+ nonFiniteValues.forEach(time => {
+ should(() => {
+ node.stop(time);
+ }, `stop(${time})`)
+ .throw(TypeError);
+ });
+}
+
diff --git a/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js b/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js
new file mode 100644
index 0000000000..6ea5eb6269
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/resources/stereopanner-testing.js
@@ -0,0 +1,205 @@
+let StereoPannerTest = (function() {
+
+ // Constants
+ let PI_OVER_TWO = Math.PI * 0.5;
+
+ // Use a power of two to eliminate any round-off when converting frames to
+ // time.
+ let gSampleRate = 32768;
+
+ // Time step when each panner node starts. Make sure this is on a frame boundary.
+ let gTimeStep = Math.floor(0.001 * gSampleRate) / gSampleRate;
+
+ // How many panner nodes to create for the test
+ let gNodesToCreate = 100;
+
+ // Total render length for all of our nodes.
+ let gRenderLength = gTimeStep * (gNodesToCreate + 1) + gSampleRate;
+
+ // Calculates channel gains based on equal power panning model.
+ // See: http://webaudio.github.io/web-audio-api/#panning-algorithm
+ function getChannelGain(pan, numberOfChannels) {
+ // The internal panning clips the pan value between -1, 1.
+ pan = Math.min(Math.max(pan, -1), 1);
+ let gainL, gainR;
+ // Consider number of channels and pan value's polarity.
+ if (numberOfChannels == 1) {
+ let panRadian = (pan * 0.5 + 0.5) * PI_OVER_TWO;
+ gainL = Math.cos(panRadian);
+ gainR = Math.sin(panRadian);
+ } else {
+ let panRadian = (pan <= 0 ? pan + 1 : pan) * PI_OVER_TWO;
+ if (pan <= 0) {
+ gainL = 1 + Math.cos(panRadian);
+ gainR = Math.sin(panRadian);
+ } else {
+ gainL = Math.cos(panRadian);
+ gainR = 1 + Math.sin(panRadian);
+ }
+ }
+ return {gainL: gainL, gainR: gainR};
+ }
+
+
+ /**
+ * Test implementation class.
+ * @param {Object} options Test options
+ * @param {Object} options.description Test description
+ * @param {Object} options.numberOfInputChannels Number of input channels
+ */
+ function Test(should, options) {
+ // Primary test flag.
+ this.success = true;
+
+ this.should = should;
+ this.context = null;
+ this.prefix = options.prefix;
+ this.numberOfInputChannels = (options.numberOfInputChannels || 1);
+ switch (this.numberOfInputChannels) {
+ case 1:
+ this.description = 'Test for mono input';
+ break;
+ case 2:
+ this.description = 'Test for stereo input';
+ break;
+ }
+
+ // Onset time position of each impulse.
+ this.onsets = [];
+
+ // Pan position value of each impulse.
+ this.panPositions = [];
+
+ // Locations of where the impulses aren't at the expected locations.
+ this.errors = [];
+
+ // The index of the current impulse being verified.
+ this.impulseIndex = 0;
+
+ // The max error we allow between the rendered impulse and the
+ // expected value. This value is experimentally determined. Set
+ // to 0 to make the test fail to see what the actual error is.
+ this.maxAllowedError = 1.284318e-7;
+
+ // Max (absolute) error and the index of the maxima for the left
+ // and right channels.
+ this.maxErrorL = 0;
+ this.maxErrorR = 0;
+ this.maxErrorIndexL = 0;
+ this.maxErrorIndexR = 0;
+
+ // The maximum value to use for panner pan value. The value will range from
+ // -panLimit to +panLimit.
+ this.panLimit = 1.0625;
+ }
+
+
+ Test.prototype.init = function() {
+ this.context = new OfflineAudioContext(2, gRenderLength, gSampleRate);
+ };
+
+ // Prepare an audio graph for testing. Create multiple impulse generators and
+ // panner nodes, then play them sequentially while varying the pan position.
+ Test.prototype.prepare = function() {
+ let impulse;
+ let impulseLength = Math.round(gTimeStep * gSampleRate);
+ let sources = [];
+ let panners = [];
+
+ // Moves the pan value for each panner by pan step unit from -2 to 2.
+ // This is to check if the internal panning value is clipped properly.
+ let panStep = (2 * this.panLimit) / (gNodesToCreate - 1);
+
+ if (this.numberOfInputChannels === 1) {
+ impulse = createImpulseBuffer(this.context, impulseLength);
+ } else {
+ impulse = createStereoImpulseBuffer(this.context, impulseLength);
+ }
+
+ for (let i = 0; i < gNodesToCreate; i++) {
+ sources[i] = this.context.createBufferSource();
+ panners[i] = this.context.createStereoPanner();
+ sources[i].connect(panners[i]);
+ panners[i].connect(this.context.destination);
+ sources[i].buffer = impulse;
+ panners[i].pan.value = this.panPositions[i] = panStep * i - this.panLimit;
+
+ // Store the onset time position of impulse.
+ this.onsets[i] = gTimeStep * i;
+
+ sources[i].start(this.onsets[i]);
+ }
+ };
+
+
+ Test.prototype.verify = function() {
+ let chanL = this.renderedBufferL;
+ let chanR = this.renderedBufferR;
+ for (let i = 0; i < chanL.length; i++) {
+ // Left and right channels must start at the same instant.
+ if (chanL[i] !== 0 || chanR[i] !== 0) {
+ // Get amount of error between actual and expected gain.
+ let expected = getChannelGain(
+ this.panPositions[this.impulseIndex], this.numberOfInputChannels);
+ let errorL = Math.abs(chanL[i] - expected.gainL);
+ let errorR = Math.abs(chanR[i] - expected.gainR);
+
+ if (errorL > this.maxErrorL) {
+ this.maxErrorL = errorL;
+ this.maxErrorIndexL = this.impulseIndex;
+ }
+ if (errorR > this.maxErrorR) {
+ this.maxErrorR = errorR;
+ this.maxErrorIndexR = this.impulseIndex;
+ }
+
+ // Keep track of the impulses that didn't show up where we expected
+ // them to be.
+ let expectedOffset =
+ timeToSampleFrame(this.onsets[this.impulseIndex], gSampleRate);
+ if (i != expectedOffset) {
+ this.errors.push({actual: i, expected: expectedOffset});
+ }
+
+ this.impulseIndex++;
+ }
+ }
+ };
+
+
+ Test.prototype.showResult = function() {
+ this.should(this.impulseIndex, this.prefix + 'Number of impulses found')
+ .beEqualTo(gNodesToCreate);
+
+ this.should(
+ this.errors.length,
+ this.prefix + 'Number of impulse at the wrong offset')
+ .beEqualTo(0);
+
+ this.should(this.maxErrorL, this.prefix + 'Left channel error magnitude')
+ .beLessThanOrEqualTo(this.maxAllowedError);
+
+ this.should(this.maxErrorR, this.prefix + 'Right channel error magnitude')
+ .beLessThanOrEqualTo(this.maxAllowedError);
+ };
+
+ Test.prototype.run = function() {
+
+ this.init();
+ this.prepare();
+
+ return this.context.startRendering().then(renderedBuffer => {
+ this.renderedBufferL = renderedBuffer.getChannelData(0);
+ this.renderedBufferR = renderedBuffer.getChannelData(1);
+ this.verify();
+ this.showResult();
+ });
+ };
+
+ return {
+ create: function(should, options) {
+ return new Test(should, options);
+ }
+ };
+
+})();
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/processing-model/cycle-without-delay.html b/testing/web-platform/tests/webaudio/the-audio-api/processing-model/cycle-without-delay.html
new file mode 100644
index 0000000000..cab0f6ca8e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/processing-model/cycle-without-delay.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<html class="a">
+ <head>
+ <title>Cycles without DelayNode in audio node graph</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script>
+ function doTest() {
+ var off = new OfflineAudioContext(1, 512, 48000);
+ var osc = new OscillatorNode(off);
+ var fb = new GainNode(off);
+ // zero delay feedback loop
+ osc.connect(fb).connect(fb).connect(off.destination);
+ osc.start(0);
+ return off.startRendering().then((b) => {
+ return Promise.resolve(b.getChannelData(0));
+ });
+ }
+
+ promise_test(() => {
+ return doTest().then(samples => {
+ var silent = true;
+ for (var i = 0; i < samples.length; i++) {
+ if (samples[i] != 0.0) {
+ silent = false;
+ break;
+ }
+ }
+ assert_true(silent);
+ });
+ }, 'Test that cycles that don\'t contain a DelayNode are muted');
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/processing-model/delay-time-clamping.html b/testing/web-platform/tests/webaudio/the-audio-api/processing-model/delay-time-clamping.html
new file mode 100644
index 0000000000..fa010df3cd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/processing-model/delay-time-clamping.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html class="a">
+ <head>
+ <title>Delay time clamping in cycles</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script>
+ function doTest() {
+ let off = new OfflineAudioContext(1, 512, 48000);
+ let b = new AudioBuffer({sampleRate: off.sampleRate, length: 1});
+ b.getChannelData(0)[0] = 1;
+ let impulse = new AudioBufferSourceNode(off, {buffer: b});
+ impulse.start(0);
+ // This delayTime of 64 samples MUST be clamped to 128 samples when
+ // in a cycle.
+ let delay = new DelayNode(off, {delayTime: 64 / 48000});
+ let fb = new GainNode(off);
+ impulse.connect(fb).connect(delay).connect(fb).connect(off.destination);
+ return off.startRendering().then((b) => {
+ return Promise.resolve(b.getChannelData(0));
+ })
+ }
+
+ promise_test(() => {
+ return doTest().then(samples => {
+ for (var i = 0; i < samples.length; i++) {
+ if ((i % 128) != 0) {
+ assert_equals(
+ samples[i], 0.0,
+ 'Non-silent audio found in between delayed impulses');
+ } else {
+ assert_equals(
+ samples[i], 1.0,
+ 'Silent audio found instead of a delayed impulse');
+ }
+ }
+ });
+ }, 'Test that a DelayNode allows a feedback loop of a single rendering quantum');
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/processing-model/feedback-delay-time.html b/testing/web-platform/tests/webaudio/the-audio-api/processing-model/feedback-delay-time.html
new file mode 100644
index 0000000000..96c2eb0658
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/processing-model/feedback-delay-time.html
@@ -0,0 +1,42 @@
+<!DOCTYPE html>
+<html class="a">
+ <head>
+ <title>Feedback cycle with delay in audio node graph</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script>
+ function doTest() {
+ var off = new OfflineAudioContext(1, 512, 48000);
+ var b = off.createBuffer(1, 1, 48000);
+ b.getChannelData(0)[0] = 1;
+ var impulse = new AudioBufferSourceNode(off, {buffer: b});
+ impulse.start(0);
+ var delay = new DelayNode(off, {delayTime: 128 / 48000});
+ var fb = new GainNode(off);
+ impulse.connect(fb).connect(delay).connect(fb).connect(off.destination);
+ var samples;
+ return off.startRendering().then((b) => {
+ return Promise.resolve(b.getChannelData(0));
+ });
+ }
+
+ promise_test(() => {
+ return doTest().then(samples => {
+ for (var i = 0; i < samples.length; i++) {
+ if ((i % 128) != 0) {
+ assert_equals(
+ samples[i], 0.0,
+ 'Non-silent audio found in between delayed impulses');
+ } else {
+ assert_equals(
+ samples[i], 1.0,
+ 'Silent audio found instead of a delayed impulse');
+ }
+ }
+ });
+ }, 'Test that a DelayNode allows a feedback loop of a single rendering quantum');
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/ctor-analyser.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/ctor-analyser.html
new file mode 100644
index 0000000000..a9aa483151
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/ctor-analyser.html
@@ -0,0 +1,183 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: AnalyserNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'AnalyserNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'AnalyserNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [
+ {name: 'fftSize', value: 2048},
+ {name: 'frequencyBinCount', value: 1024},
+ {name: 'minDecibels', value: -100}, {name: 'maxDecibels', value: -30},
+ {name: 'smoothingTimeConstant', value: 0.8}
+ ]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'AnalyserNode');
+ task.done();
+ });
+
+ audit.define('constructor with options', (task, should) => {
+ let options = {
+ fftSize: 32,
+ maxDecibels: 1,
+ minDecibels: -13,
+ // Choose a value that can be represented the same as a float and as a
+ // double.
+ smoothingTimeConstant: 0.125
+ };
+
+ let node;
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node1 = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ should(node instanceof AnalyserNode, 'node1 instanceof AnalyserNode')
+ .beEqualTo(true);
+ should(node.fftSize, 'node1.fftSize').beEqualTo(options.fftSize);
+ should(node.maxDecibels, 'node1.maxDecibels')
+ .beEqualTo(options.maxDecibels);
+ should(node.minDecibels, 'node1.minDecibels')
+ .beEqualTo(options.minDecibels);
+ should(node.smoothingTimeConstant, 'node1.smoothingTimeConstant')
+ .beEqualTo(options.smoothingTimeConstant);
+
+ task.done();
+ });
+
+ audit.define('construct invalid options', (task, should) => {
+ let node;
+
+ should(
+ () => {
+ node = new AnalyserNode(context, {fftSize: 33});
+ },
+ 'node = new AnalyserNode(c, { fftSize: 33 })')
+ .throw(DOMException, 'IndexSizeError');
+ should(
+ () => {
+ node = new AnalyserNode(context, {maxDecibels: -500});
+ },
+ 'node = new AnalyserNode(c, { maxDecibels: -500 })')
+ .throw(DOMException, 'IndexSizeError');
+ should(
+ () => {
+ node = new AnalyserNode(context, {minDecibels: -10});
+ },
+ 'node = new AnalyserNode(c, { minDecibels: -10 })')
+ .throw(DOMException, 'IndexSizeError');
+ should(
+ () => {
+ node = new AnalyserNode(context, {smoothingTimeConstant: 2});
+ },
+ 'node = new AnalyserNode(c, { smoothingTimeConstant: 2 })')
+ .throw(DOMException, 'IndexSizeError');
+ should(function() {
+ node = new AnalyserNode(context, {frequencyBinCount: 33});
+ }, 'node = new AnalyserNode(c, { frequencyBinCount: 33 })').notThrow();
+ should(node.frequencyBinCount, 'node.frequencyBinCount')
+ .beEqualTo(1024);
+
+ task.done();
+ });
+
+ audit.define('setting min/max', (task, should) => {
+ let node;
+
+ // Recall the default values of minDecibels and maxDecibels are -100,
+ // and -30, respectively. Setting both values in the constructor should
+ // not signal an error in any of the following cases.
+ let options = {minDecibels: -10, maxDecibels: 20};
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ options = {maxDecibels: 20, minDecibels: -10};
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ options = {minDecibels: -200, maxDecibels: -150};
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ options = {maxDecibels: -150, minDecibels: -200};
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ // But these should signal because minDecibel > maxDecibel
+ options = {maxDecibels: -150, minDecibels: -10};
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'IndexSizeError');
+
+ options = {minDecibels: -10, maxDecibels: -150};
+ should(
+ () => {
+ node = new AnalyserNode(context, options);
+ },
+ 'node = new AnalyserNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'IndexSizeError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-basic.html
new file mode 100644
index 0000000000..e176d6111e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-basic.html
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ realtimeanalyser-basic.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context = 0;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Basic AnalyserNode test', function(task, should) {
+ context = new AudioContext();
+ let analyser = context.createAnalyser();
+
+ should(analyser.numberOfInputs, 'Number of inputs for AnalyserNode')
+ .beEqualTo(1);
+
+ should(analyser.numberOfOutputs, 'Number of outputs for AnalyserNode')
+ .beEqualTo(1);
+
+ should(analyser.minDecibels, 'Default minDecibels value')
+ .beEqualTo(-100);
+
+ should(analyser.maxDecibels, 'Default maxDecibels value')
+ .beEqualTo(-30);
+
+ should(
+ analyser.smoothingTimeConstant,
+ 'Default smoothingTimeConstant value')
+ .beEqualTo(0.8);
+
+ let expectedValue = -50 - (1 / 3);
+ analyser.minDecibels = expectedValue;
+
+ should(analyser.minDecibels, 'node.minDecibels = ' + expectedValue)
+ .beEqualTo(expectedValue);
+
+ expectedValue = -40 - (1 / 3);
+ analyser.maxDecibels = expectedValue;
+
+ should(analyser.maxDecibels, 'node.maxDecibels = ' + expectedValue)
+ .beEqualTo(expectedValue);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-scaling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-scaling.html
new file mode 100644
index 0000000000..043bd5890a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-scaling.html
@@ -0,0 +1,111 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ realtimeanalyser-fft-scaling.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <div id="description"></div>
+ <div id="console"></div>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // The number of analysers. We have analysers from size for each of the
+ // possible sizes of 2^5 to 2^15 for a total of 11.
+ let numberOfAnalysers = 11;
+ let sampleRate = 44100;
+ let nyquistFrequency = sampleRate / 2;
+
+ // Frequency of the sine wave test signal. Should be high enough so that
+ // we get at least one full cycle for the 32-point FFT. This should also
+ // be such that the frequency should be exactly in one of the FFT bins for
+ // each of the possible FFT sizes.
+ let oscFrequency = nyquistFrequency / 16;
+
+ // The actual peak values from each analyser. Useful for examining the
+ // actual peak values.
+ let peakValue = new Array(numberOfAnalysers);
+
+ // For a 0dBFS sine wave, we would expect the FFT magnitude to be 0dB as
+ // well, but the analyzer node applies a Blackman window (to smooth the
+ // estimate). This reduces the energy of the signal so the FFT peak is
+ // less than 0dB. The threshold value given here was determined
+ // experimentally.
+ //
+ // See https://code.google.com/p/chromium/issues/detail?id=341596.
+ let peakThreshold = [
+ -14.43, -13.56, -13.56, -13.56, -13.56, -13.56, -13.56, -13.56, -13.56,
+ -13.56, -13.56
+ ];
+
+ function checkResult(order, analyser, should) {
+ return function() {
+ let index = order - 5;
+ let fftSize = 1 << order;
+ let fftData = new Float32Array(fftSize);
+ analyser.getFloatFrequencyData(fftData);
+
+ // Compute the frequency bin that should contain the peak.
+ let expectedBin =
+ analyser.frequencyBinCount * (oscFrequency / nyquistFrequency);
+
+ // Find the actual bin by finding the bin containing the peak.
+ let actualBin = 0;
+ peakValue[index] = -1000;
+ for (k = 0; k < analyser.frequencyBinCount; ++k) {
+ if (fftData[k] > peakValue[index]) {
+ actualBin = k;
+ peakValue[index] = fftData[k];
+ }
+ }
+
+ should(actualBin, (1 << order) + '-point FFT peak position')
+ .beEqualTo(expectedBin);
+
+ should(
+ peakValue[index], (1 << order) + '-point FFT peak value in dBFS')
+ .beGreaterThanOrEqualTo(peakThreshold[index]);
+ }
+ }
+
+ audit.define(
+ {
+ label: 'FFT scaling tests',
+ description: 'Test Scaling of FFT in AnalyserNode'
+ },
+ async function(task, should) {
+ let tests = [];
+ for (let k = 5; k <= 15; ++k)
+ await runTest(k, should);
+ task.done();
+ });
+
+ function runTest(order, should) {
+ let context = new OfflineAudioContext(1, 1 << order, sampleRate);
+ // Use a sine wave oscillator as the reference source signal.
+ let osc = context.createOscillator();
+ osc.type = 'sine';
+ osc.frequency.value = oscFrequency;
+ osc.connect(context.destination);
+
+ let analyser = context.createAnalyser();
+ // No smoothing to simplify the analysis of the result.
+ analyser.smoothingTimeConstant = 0;
+ analyser.fftSize = 1 << order;
+ osc.connect(analyser);
+
+ osc.start();
+ return context.startRendering().then(() => {
+ checkResult(order, analyser, should)();
+ });
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-sizing.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-sizing.html
new file mode 100644
index 0000000000..7ee6a2237e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/realtimeanalyser-fft-sizing.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ realtimeanalyser-fft-sizing.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ function doTest(fftSize, illegal, should) {
+ let c = new OfflineAudioContext(1, 1000, 44100);
+ let a = c.createAnalyser();
+ let message = 'Setting fftSize to ' + fftSize;
+ let tester = function() {
+ a.fftSize = fftSize;
+ };
+
+ if (illegal) {
+ should(tester, message).throw(DOMException, 'IndexSizeError');
+ } else {
+ should(tester, message).notThrow();
+ }
+ }
+
+ audit.define(
+ {
+ label: 'FFT size test',
+ description: 'Test that re-sizing the FFT arrays does not fail.'
+ },
+ function(task, should) {
+ doTest(-1, true, should);
+ doTest(0, true, should);
+ doTest(1, true, should);
+ for (let i = 2; i <= 0x20000; i *= 2) {
+ if (i >= 32 && i <= 32768)
+ doTest(i, false, should);
+ else
+ doTest(i, true, should);
+ doTest(i + 1, true, should);
+ }
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-gain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-gain.html
new file mode 100644
index 0000000000..dff51a74c5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-gain.html
@@ -0,0 +1,50 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script>
+promise_test(function() {
+ // fftSize <= bufferSize so that the time domain data is full of input after
+ // processing the buffer.
+ const fftSize = 32;
+ const bufferSize = 128;
+
+ var context = new OfflineAudioContext(1, bufferSize, 48000);
+
+ var analyser1 = context.createAnalyser();
+ analyser1.fftSize = fftSize;
+ analyser1.connect(context.destination);
+ var analyser2 = context.createAnalyser();
+ analyser2.fftSize = fftSize;
+
+ var gain = context.createGain();
+ gain.gain.value = 2.0;
+ gain.connect(analyser1);
+ gain.connect(analyser2);
+
+ // Create a DC input to make getFloatTimeDomainData() output consistent at
+ // any time.
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0 / gain.gain.value;
+ var source = context.createBufferSource();
+ source.buffer = buffer;
+ source.loop = true;
+ source.connect(gain);
+ source.start();
+
+ return context.startRendering().then(function(buffer) {
+ assert_equals(buffer.getChannelData(0)[0], 1.0, "analyser1 output");
+
+ var data = new Float32Array(1);
+ analyser1.getFloatTimeDomainData(data);
+ assert_equals(data[0], 1.0, "analyser1 time domain data");
+ analyser2.getFloatTimeDomainData(data);
+ assert_equals(data[0], 1.0, "analyser2 time domain data");
+ });
+}, "Test effect of AnalyserNode on GainNode output");
+ </script>
+</head>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-minimum.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-minimum.html
new file mode 100644
index 0000000000..ab0fe6b2d6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-minimum.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <title>Test AnalyserNode when the input is silent</title>
+ <meta name="timeout" content="long">
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script>
+ setup({ single_test: true });
+ var ac = new AudioContext();
+ var analyser = ac.createAnalyser();
+ var constant = ac.createConstantSource();
+ var sp = ac.createScriptProcessor(2048, 1, 1);
+
+ constant.offset.value = 0.0;
+
+ constant.connect(analyser).connect(ac.destination);
+
+ constant.connect(sp).connect(ac.destination);
+
+ var buf = new Float32Array(analyser.frequencyBinCount);
+ var iteration_count = 10;
+ sp.onaudioprocess = function() {
+ analyser.getFloatFrequencyData(buf);
+ var correct = true;
+ for (var i = 0; i < buf.length; i++) {
+ correct &= buf[i] == -Infinity;
+ }
+ assert_true(!!correct, "silent input process -Infinity in decibel bins");
+ if (!iteration_count--) {
+ sp.onaudioprocess = null;
+ constant.stop();
+ ac.close();
+ done();
+ }
+ };
+
+ constant.start();
+ </script>
+</head>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-output.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-output.html
new file mode 100644
index 0000000000..43d56b8990
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-output.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <title>AnalyserNode output</title>
+ <meta name="timeout" content="long">
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ <script>
+setup({ single_test: true });
+
+var gTest = {
+ length: 2048,
+ numberOfChannels: 1,
+ createGraph: function(context) {
+ var source = context.createBufferSource();
+
+ var analyser = context.createAnalyser();
+
+ source.buffer = this.buffer;
+
+ source.connect(analyser);
+
+ source.start(0);
+ return analyser;
+ },
+ createExpectedBuffers: function(context) {
+ this.buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ this.buffer.getChannelData(0)[i] = Math.sin(
+ 440 * 2 * Math.PI * i / context.sampleRate
+ );
+ }
+
+ return [this.buffer];
+ }
+};
+
+runTest("AnalyserNode output");
+ </script>
+</head>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-scale.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-scale.html
new file mode 100644
index 0000000000..904b14bede
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analyser-scale.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <title>Test AnalyserNode when the input is scaled</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script>
+ setup({ single_test: true });
+
+ var context = new AudioContext();
+
+ var gain = context.createGain();
+ var analyser = context.createAnalyser();
+ var osc = context.createOscillator();
+
+ osc.connect(gain);
+ gain.connect(analyser);
+
+ osc.start();
+
+ var array = new Uint8Array(analyser.frequencyBinCount);
+
+ function getAnalyserData() {
+ gain.gain.setValueAtTime(currentGain, context.currentTime);
+ analyser.getByteTimeDomainData(array);
+ var inrange = true;
+ var max = -1;
+ for (var i = 0; i < array.length; i++) {
+ if (array[i] > max) {
+ max = Math.abs(array[i] - 128);
+ }
+ }
+ if (max <= currentGain * 128) {
+ assert_true(true, "Analyser got scaled data for " + currentGain);
+ currentGain = tests.shift();
+ if (currentGain == undefined) {
+ done();
+ return;
+ }
+ }
+ requestAnimationFrame(getAnalyserData);
+ }
+
+ var tests = [1.0, 0.5, 0.0];
+ var currentGain = tests.shift();
+ requestAnimationFrame(getAnalyserData);
+ </script>
+</head>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analysernode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analysernode.html
new file mode 100644
index 0000000000..e8325388d1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/test-analysernode.html
@@ -0,0 +1,237 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script>
+ function testNode() {
+ var context = new AudioContext();
+ var buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ buffer.getChannelData(0)[i] = Math.sin(
+ 440 * 2 * Math.PI * i / context.sampleRate
+ );
+ }
+
+ var destination = context.destination;
+
+ var source = context.createBufferSource();
+
+ var analyser = context.createAnalyser();
+
+ source.buffer = buffer;
+
+ source.connect(analyser);
+ analyser.connect(destination);
+
+ assert_equals(
+ analyser.channelCount,
+ 2,
+ "analyser node has 2 input channels by default"
+ );
+ assert_equals(
+ analyser.channelCountMode,
+ "max",
+ "Correct channelCountMode for the analyser node"
+ );
+ assert_equals(
+ analyser.channelInterpretation,
+ "speakers",
+ "Correct channelCountInterpretation for the analyser node"
+ );
+
+ assert_equals(
+ analyser.fftSize,
+ 2048,
+ "Correct default value for fftSize"
+ );
+ assert_equals(
+ analyser.frequencyBinCount,
+ 1024,
+ "Correct default value for frequencyBinCount"
+ );
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 0;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 1;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 8;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 100;
+ }); // non-power of two
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 2049;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 4097;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 8193;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 16385;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 32769;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.fftSize = 65536;
+ });
+ analyser.fftSize = 1024;
+ assert_equals(
+ analyser.frequencyBinCount,
+ 512,
+ "Correct new value for frequencyBinCount"
+ );
+
+ assert_equals(
+ analyser.minDecibels,
+ -100,
+ "Correct default value for minDecibels"
+ );
+ assert_equals(
+ analyser.maxDecibels,
+ -30,
+ "Correct default value for maxDecibels"
+ );
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.minDecibels = -30;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.minDecibels = -29;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.maxDecibels = -100;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.maxDecibels = -101;
+ });
+
+ assert_true(
+ Math.abs(analyser.smoothingTimeConstant - 0.8) < 0.001,
+ "Correct default value for smoothingTimeConstant"
+ );
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.smoothingTimeConstant = -0.1;
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser.smoothingTimeConstant = 1.1;
+ });
+ analyser.smoothingTimeConstant = 0;
+ analyser.smoothingTimeConstant = 1;
+ }
+
+ function testConstructor() {
+ var context = new AudioContext();
+
+ var analyser = new AnalyserNode(context);
+ assert_equals(
+ analyser.channelCount,
+ 2,
+ "analyser node has 2 input channels by default"
+ );
+ assert_equals(
+ analyser.channelCountMode,
+ "max",
+ "Correct channelCountMode for the analyser node"
+ );
+ assert_equals(
+ analyser.channelInterpretation,
+ "speakers",
+ "Correct channelCountInterpretation for the analyser node"
+ );
+
+ assert_equals(
+ analyser.fftSize,
+ 2048,
+ "Correct default value for fftSize"
+ );
+ assert_equals(
+ analyser.frequencyBinCount,
+ 1024,
+ "Correct default value for frequencyBinCount"
+ );
+ assert_equals(
+ analyser.minDecibels,
+ -100,
+ "Correct default value for minDecibels"
+ );
+ assert_equals(
+ analyser.maxDecibels,
+ -30,
+ "Correct default value for maxDecibels"
+ );
+ assert_true(
+ Math.abs(analyser.smoothingTimeConstant - 0.8) < 0.001,
+ "Correct default value for smoothingTimeConstant"
+ );
+
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 0 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 1 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 8 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 100 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 2049 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 4097 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 8193 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 16385 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 32769 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { fftSize: 65536 });
+ });
+ analyser = new AnalyserNode(context, { fftSize: 1024 });
+ assert_equals(
+ analyser.frequencyBinCount,
+ 512,
+ "Correct new value for frequencyBinCount"
+ );
+
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { minDecibels: -30 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { minDecibels: -29 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { maxDecibels: -100 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { maxDecibels: -101 });
+ });
+
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: -0.1 });
+ });
+ assert_throws_dom("INDEX_SIZE_ERR", function() {
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: -1.1 });
+ });
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: 0 });
+ analyser = new AnalyserNode(context, { smoothingTimeConstant: 1 });
+ }
+ test(testNode, "Test AnalyserNode API");
+ test(testConstructor, "Test AnalyserNode's ctor API");
+ </script>
+</head>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/acquire-the-content.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/acquire-the-content.html
new file mode 100644
index 0000000000..659a69c866
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/acquire-the-content.html
@@ -0,0 +1,85 @@
+<!doctype html>
+<meta charset="utf-8">
+<title>Test for AudioBuffer's "acquire the content" operation</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+const SAMPLERATE = 8000;
+const LENGTH = 128;
+
+var tests = {
+ "AudioBufferSourceNode setter set with non-null buffer": function(oac) {
+ var buf = oac.createBuffer(1, LENGTH, SAMPLERATE)
+ var bs = new AudioBufferSourceNode(oac);
+ var channelData = buf.getChannelData(0);
+ for (var i = 0; i < channelData.length; i++) {
+ channelData[i] = 1.0;
+ }
+ bs.buffer = buf;
+ bs.start(); // This acquires the content since buf is not null
+ for (var i = 0; i < channelData.length; i++) {
+ channelData[i] = 0.5;
+ }
+ allSamplesAtOne(buf, "reading back");
+ bs.connect(oac.destination);
+ return oac.startRendering();
+ },
+ "AudioBufferSourceNode buffer setter set with null" : (oac) => {
+ var buf = oac.createBuffer(1, LENGTH, SAMPLERATE)
+ var bs = new AudioBufferSourceNode(oac);
+ var channelData = buf.getChannelData(0);
+ for (var i = 0; i < channelData.length; i++) {
+ channelData[i] = 1.0;
+ }
+ bs.buffer = null;
+ bs.start(); // This does not acquire the content
+ bs.buffer = buf; // This does
+ for (var i = 0; i < channelData.length; i++) {
+ channelData[i] = 0.5;
+ }
+ allSamplesAtOne(buf, "reading back");
+ bs.connect(oac.destination);
+ return oac.startRendering();
+ },
+ "ConvolverNode": (oac) => {
+ var buf = oac.createBuffer(1, LENGTH, SAMPLERATE)
+ var impulse = oac.createBuffer(1, 1, SAMPLERATE)
+ var bs = new AudioBufferSourceNode(oac);
+ var convolver = new ConvolverNode(oac, {disableNormalization: true});
+
+ impulse.getChannelData(0)[0] = 1.0; // unit impulse function
+ convolver.buffer = impulse; // This does acquire the content
+ impulse.getChannelData(0)[0] = 0.5;
+
+ var channelData = buf.getChannelData(0);
+ for (var i = 0; i < channelData.length; i++) {
+ channelData[i] = 1.0;
+ }
+ bs.buffer = buf;
+ bs.start();
+
+ bs.connect(convolver).connect(oac.destination);
+ return oac.startRendering();
+ }
+};
+
+function allSamplesAtOne(audiobuffer, location) {
+ var buf = audiobuffer.getChannelData(0);
+ for (var i = 0; i < buf.length; i++) {
+ // The convolver can introduce a slight numerical error.
+ if (Math.abs(buf[i] - 1.0) > 0.0001) {
+ assert_true(false, `Invalid value at index ${i}, expecte close to 1.0, found ${buf[i]} when ${location}`)
+ return Promise.reject();
+ }
+ }
+ assert_true(true, `Buffer unmodified when ${location}.`);
+ return Promise.resolve();
+}
+
+for (const test of Object.keys(tests)) {
+ promise_test(async function(t) {
+ var buf = await tests[test](new OfflineAudioContext(1, LENGTH, SAMPLERATE));
+ return allSamplesAtOne(buf, "rendering");
+ }, test);
+};
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html
new file mode 100644
index 0000000000..c0cd49d325
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html
@@ -0,0 +1,330 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Basic Functionality of AudioBuffer.copyFromChannel and
+ AudioBuffer.copyToChannel
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Define utility routines.
+
+ // Initialize the AudioBuffer |buffer| with a ramp signal on each channel.
+ // The ramp starts at channel number + 1.
+ function initializeAudioBufferRamp(buffer) {
+ for (let c = 0; c < buffer.numberOfChannels; ++c) {
+ let d = buffer.getChannelData(c);
+ for (let k = 0; k < d.length; ++k) {
+ d[k] = k + c + 1;
+ }
+ }
+ }
+
+ // Create a Float32Array of length |length| and initialize the array to
+ // -1.
+ function createInitializedF32Array(length) {
+ let x = new Float32Array(length);
+ for (let k = 0; k < length; ++k) {
+ x[k] = -1;
+ }
+ return x;
+ }
+
+ // Create a Float32Array of length |length| that is initialized to be a
+ // ramp starting at 1.
+ function createFloat32RampArray(length) {
+ let x = new Float32Array(length);
+ for (let k = 0; k < x.length; ++k) {
+ x[k] = k + 1;
+ }
+
+ return x;
+ }
+
+ // Test that the array |x| is a ramp starting at value |start| of length
+ // |length|, starting at |startIndex| in the array. |startIndex| is
+ // optional and defaults to 0. Any other values must be -1.
+ function shouldBeRamp(
+ should, testName, x, startValue, length, startIndex) {
+ let k;
+ let startingIndex = startIndex || 0;
+ let expected = Array(x.length);
+
+ // Fill the expected array with the correct results.
+
+ // The initial part (if any) must be -1.
+ for (k = 0; k < startingIndex; ++k) {
+ expected[k] = -1;
+ }
+
+ // The second part should be a ramp starting with |startValue|
+ for (; k < startingIndex + length; ++k) {
+ expected[k] = startValue + k - startingIndex;
+ }
+
+ // The last part (if any) should be -1.
+ for (; k < x.length; ++k) {
+ expected[k] = -1;
+ }
+
+ should(x, testName, {numberOfArrayLog: 32}).beEqualToArray(expected);
+ }
+
+ let audit = Audit.createTaskRunner();
+
+ let context = new AudioContext();
+ // Temp array for testing exceptions for copyToChannel/copyFromChannel.
+ // The length is arbitrary.
+ let x = new Float32Array(8);
+
+ // Number of frames in the AudioBuffer for testing. This is pretty
+ // arbitrary so choose a fairly small value.
+ let bufferLength = 16;
+
+ // Number of channels in the AudioBuffer. Also arbitrary, but it should
+ // be greater than 1 for test coverage.
+ let numberOfChannels = 3;
+
+ // AudioBuffer that will be used for testing copyFrom and copyTo.
+ let buffer = context.createBuffer(
+ numberOfChannels, bufferLength, context.sampleRate);
+
+ let initialValues = Array(numberOfChannels);
+
+ // Initialize things
+ audit.define('initialize', (task, should) => {
+ // Initialize to -1.
+ initialValues.fill(-1);
+ should(initialValues, 'Initialized values').beConstantValueOf(-1)
+ task.done();
+ });
+
+ // Test that expected exceptions are signaled for copyFrom.
+ audit.define('copyFrom-exceptions', (task, should) => {
+ should(
+ AudioBuffer.prototype.copyFromChannel,
+ 'AudioBuffer.prototype.copyFromChannel')
+ .exist();
+
+ should(
+ () => {
+ buffer = context.createBuffer(
+ numberOfChannels, bufferLength, context.sampleRate);
+ },
+ '0: buffer = context.createBuffer(' + numberOfChannels + ', ' +
+ bufferLength + ', context.sampleRate)')
+ .notThrow();
+ should(() => {
+ buffer.copyFromChannel(null, 0);
+ }, '1: buffer.copyFromChannel(null, 0)').throw(TypeError);
+ should(() => {
+ buffer.copyFromChannel(context, 0);
+ }, '2: buffer.copyFromChannel(context, 0)').throw(TypeError);
+ should(() => {
+ buffer.copyFromChannel(x, -1);
+ }, '3: buffer.copyFromChannel(x, -1)').throw(DOMException, 'IndexSizeError');
+ should(
+ () => {
+ buffer.copyFromChannel(x, numberOfChannels);
+ },
+ '4: buffer.copyFromChannel(x, ' + numberOfChannels + ')')
+ .throw(DOMException, 'IndexSizeError');
+ ;
+ should(() => {
+ buffer.copyFromChannel(x, 0, -1);
+ }, '5: buffer.copyFromChannel(x, 0, -1)').notThrow();
+ should(
+ () => {
+ buffer.copyFromChannel(x, 0, bufferLength);
+ },
+ '6: buffer.copyFromChannel(x, 0, ' + bufferLength + ')')
+ .notThrow();
+
+ should(() => {
+ buffer.copyFromChannel(x, 3);
+ }, '7: buffer.copyFromChannel(x, 3)').throw(DOMException, 'IndexSizeError');
+
+ // See https://github.com/whatwg/html/issues/5380 for why not `new SharedArrayBuffer()`
+ // WebAssembly.Memory's size is in multiples of 64 KiB
+ const shared_buffer = new Float32Array(new WebAssembly.Memory({ shared:true, initial:1, maximum:1 }).buffer);
+ should(
+ () => {
+ buffer.copyFromChannel(shared_buffer, 0);
+ },
+ '8: buffer.copyFromChannel(SharedArrayBuffer view, 0)')
+ .throw(TypeError);
+
+ should(
+ () => {
+ buffer.copyFromChannel(shared_buffer, 0, 0);
+ },
+ '9: buffer.copyFromChannel(SharedArrayBuffer view, 0, 0)')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // Test that expected exceptions are signaled for copyTo.
+ audit.define('copyTo-exceptions', (task, should) => {
+ should(
+ AudioBuffer.prototype.copyToChannel,
+ 'AudioBuffer.prototype.copyToChannel')
+ .exist();
+ should(() => {
+ buffer.copyToChannel(null, 0);
+ }, '0: buffer.copyToChannel(null, 0)').throw(TypeError);
+ should(() => {
+ buffer.copyToChannel(context, 0);
+ }, '1: buffer.copyToChannel(context, 0)').throw(TypeError);
+ should(() => {
+ buffer.copyToChannel(x, -1);
+ }, '2: buffer.copyToChannel(x, -1)').throw(DOMException, 'IndexSizeError');
+ should(
+ () => {
+ buffer.copyToChannel(x, numberOfChannels);
+ },
+ '3: buffer.copyToChannel(x, ' + numberOfChannels + ')')
+ .throw(DOMException, 'IndexSizeError');
+ should(() => {
+ buffer.copyToChannel(x, 0, -1);
+ }, '4: buffer.copyToChannel(x, 0, -1)').notThrow();
+ should(
+ () => {
+ buffer.copyToChannel(x, 0, bufferLength);
+ },
+ '5: buffer.copyToChannel(x, 0, ' + bufferLength + ')')
+ .notThrow();
+
+ should(() => {
+ buffer.copyToChannel(x, 3);
+ }, '6: buffer.copyToChannel(x, 3)').throw(DOMException, 'IndexSizeError');
+
+ // See https://github.com/whatwg/html/issues/5380 for why not `new SharedArrayBuffer()`
+ // WebAssembly.Memory's size is in multiples of 64 KiB
+ const shared_buffer = new Float32Array(new WebAssembly.Memory({ shared:true, initial:1, maximum:1 }).buffer);
+ should(
+ () => {
+ buffer.copyToChannel(shared_buffer, 0);
+ },
+ '7: buffer.copyToChannel(SharedArrayBuffer view, 0)')
+ .throw(TypeError);
+
+ should(
+ () => {
+ buffer.copyToChannel(shared_buffer, 0, 0);
+ },
+ '8: buffer.copyToChannel(SharedArrayBuffer view, 0, 0)')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // Test copyFromChannel
+ audit.define('copyFrom-validate', (task, should) => {
+ // Initialize the AudioBuffer to a ramp for testing copyFrom.
+ initializeAudioBufferRamp(buffer);
+
+ // Test copyFrom operation with a short destination array, filling the
+ // destination completely.
+ for (let c = 0; c < numberOfChannels; ++c) {
+ let dst8 = createInitializedF32Array(8);
+ buffer.copyFromChannel(dst8, c);
+ shouldBeRamp(
+ should, 'buffer.copyFromChannel(dst8, ' + c + ')', dst8, c + 1, 8)
+ }
+
+ // Test copyFrom operation with a short destination array using a
+ // non-zero start index that still fills the destination completely.
+ for (let c = 0; c < numberOfChannels; ++c) {
+ let dst8 = createInitializedF32Array(8);
+ buffer.copyFromChannel(dst8, c, 1);
+ shouldBeRamp(
+ should, 'buffer.copyFromChannel(dst8, ' + c + ', 1)', dst8, c + 2,
+ 8)
+ }
+
+ // Test copyFrom operation with a short destination array using a
+ // non-zero start index that does not fill the destinatiom completely.
+ // The extra elements should be unchanged.
+ for (let c = 0; c < numberOfChannels; ++c) {
+ let dst8 = createInitializedF32Array(8);
+ let startInChannel = bufferLength - 5;
+ buffer.copyFromChannel(dst8, c, startInChannel);
+ shouldBeRamp(
+ should,
+ 'buffer.copyFromChannel(dst8, ' + c + ', ' + startInChannel + ')',
+ dst8, c + 1 + startInChannel, bufferLength - startInChannel);
+ }
+
+ // Copy operation with the destination longer than the buffer, leaving
+ // the trailing elements of the destination untouched.
+ for (let c = 0; c < numberOfChannels; ++c) {
+ let dst26 = createInitializedF32Array(bufferLength + 10);
+ buffer.copyFromChannel(dst26, c);
+ shouldBeRamp(
+ should, 'buffer.copyFromChannel(dst26, ' + c + ')', dst26, c + 1,
+ bufferLength);
+ }
+
+ task.done();
+ });
+
+ // Test copyTo
+ audit.define('copyTo-validate', (task, should) => {
+ // Create a source consisting of a ramp starting at 1, longer than the
+ // AudioBuffer
+ let src = createFloat32RampArray(bufferLength + 10);
+
+ // Test copyTo with AudioBuffer shorter than Float32Array. The
+ // AudioBuffer should be completely filled with the Float32Array.
+ should(
+ () => {
+ buffer =
+ createConstantBuffer(context, bufferLength, initialValues);
+ },
+ 'buffer = createConstantBuffer(context, ' + bufferLength + ', [' +
+ initialValues + '])')
+ .notThrow();
+
+ for (let c = 0; c < numberOfChannels; ++c) {
+ buffer.copyToChannel(src, c);
+ shouldBeRamp(
+ should, 'buffer.copyToChannel(src, ' + c + ')',
+ buffer.getChannelData(c), 1, bufferLength);
+ }
+
+ // Test copyTo with AudioBuffer longer than the Float32Array. The tail
+ // of the AudioBuffer should be unchanged.
+ buffer = createConstantBuffer(context, bufferLength, initialValues);
+ let src10 = createFloat32RampArray(10);
+ for (let c = 0; c < numberOfChannels; ++c) {
+ buffer.copyToChannel(src10, c);
+ shouldBeRamp(
+ should, 'buffer.copyToChannel(src10, ' + c + ')',
+ buffer.getChannelData(c), 1, 10);
+ }
+
+ // Test copyTo with non-default startInChannel. Part of the AudioBuffer
+ // should filled with the beginning and end sections untouched.
+ buffer = createConstantBuffer(context, bufferLength, initialValues);
+ for (let c = 0; c < numberOfChannels; ++c) {
+ let startInChannel = 5;
+ buffer.copyToChannel(src10, c, startInChannel);
+
+ shouldBeRamp(
+ should,
+ 'buffer.copyToChannel(src10, ' + c + ', ' + startInChannel + ')',
+ buffer.getChannelData(c), 1, src10.length, startInChannel);
+ }
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-getChannelData.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-getChannelData.html
new file mode 100644
index 0000000000..612a91cf4e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-getChannelData.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioBuffer.getChannelData() Returns the Same Object
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let renderDuration = 0.5;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('buffer-eq', (task, should) => {
+ // Verify that successive calls to getChannelData return the same
+ // buffer.
+ let context = new AudioContext();
+ let channelCount = 2;
+ let frameLength = 1000;
+ let buffer =
+ context.createBuffer(channelCount, frameLength, context.sampleRate);
+
+ for (let c = 0; c < channelCount; ++c) {
+ let a = buffer.getChannelData(c);
+ let b = buffer.getChannelData(c);
+
+ let message = 'buffer.getChannelData(' + c + ')';
+ should(a === b, message + ' === ' + message).beEqualTo(true);
+ }
+
+ task.done();
+ });
+
+ audit.define('buffer-not-eq', (task, should) => {
+ let context = new AudioContext();
+ let channelCount = 2;
+ let frameLength = 1000;
+ let buffer1 =
+ context.createBuffer(channelCount, frameLength, context.sampleRate);
+ let buffer2 =
+ context.createBuffer(channelCount, frameLength, context.sampleRate);
+ let success = true;
+
+ for (let c = 0; c < channelCount; ++c) {
+ let a = buffer1.getChannelData(c);
+ let b = buffer2.getChannelData(c);
+
+ let message = 'getChannelData(' + c + ')';
+ should(a === b, 'buffer1.' + message + ' === buffer2.' + message)
+ .beEqualTo(false) &&
+ success;
+ }
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-reuse.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-reuse.html
new file mode 100644
index 0000000000..dabe323cbe
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-reuse.html
@@ -0,0 +1,36 @@
+<!doctype html>
+<meta charset="utf-8">
+<title>AudioBuffer can be reused between AudioBufferSourceNodes</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+function render_audio_context() {
+ let sampleRate = 44100;
+ let context = new OfflineAudioContext(
+ 2, sampleRate * 0.1, sampleRate);
+ let buf = context.createBuffer(1, 0.1 * sampleRate, context.sampleRate);
+ let data = buf.getChannelData(0);
+ data[0] = 0.5;
+ data[1] = 0.25;
+ let b1 = context.createBufferSource();
+ b1.buffer = buf;
+ b1.start();
+ let b2 = context.createBufferSource();
+ b2.buffer = buf;
+ b2.start();
+ let merger = context.createChannelMerger(2);
+ b1.connect(merger, 0, 0);
+ b2.connect(merger, 0, 1);
+ merger.connect(context.destination);
+ return context.startRendering();
+}
+promise_test(function() {
+ return render_audio_context()
+ .then(function(buffer) {
+ assert_equals(buffer.getChannelData(0)[0], 0.5);
+ assert_equals(buffer.getChannelData(1)[0], 0.5);
+ assert_equals(buffer.getChannelData(0)[1], 0.25);
+ assert_equals(buffer.getChannelData(1)[1], 0.25);
+ });
+}, "AudioBuffer can be reused between AudioBufferSourceNodes");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer.html
new file mode 100644
index 0000000000..a2c4581c4e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiobuffer.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 44100.0
+ let lengthInSeconds = 2;
+ let numberOfChannels = 4;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Basic tests for AudioBuffer', function(task, should) {
+ let context = new AudioContext();
+ let buffer = context.createBuffer(
+ numberOfChannels, sampleRate * lengthInSeconds, sampleRate);
+
+ // Just for printing out a message describing what "buffer" is in the
+ // following tests.
+ should(
+ true,
+ 'buffer = context.createBuffer(' + numberOfChannels + ', ' +
+ (sampleRate * lengthInSeconds) + ', ' + sampleRate + ')')
+ .beTrue();
+
+ should(buffer.sampleRate, 'buffer.sampleRate').beEqualTo(sampleRate);
+
+ should(buffer.length, 'buffer.length')
+ .beEqualTo(sampleRate * lengthInSeconds);
+
+ should(buffer.duration, 'buffer.duration').beEqualTo(lengthInSeconds);
+
+ should(buffer.numberOfChannels, 'buffer.numberOfChannels')
+ .beEqualTo(numberOfChannels);
+
+ for (let index = 0; index < buffer.numberOfChannels; ++index) {
+ should(
+ buffer.getChannelData(index) instanceof window.Float32Array,
+ 'buffer.getChannelData(' + index +
+ ') instanceof window.Float32Array')
+ .beTrue();
+ }
+
+ should(
+ function() {
+ buffer.getChannelData(buffer.numberOfChannels);
+ },
+ 'buffer.getChannelData(' + buffer.numberOfChannels + ')')
+ .throw(DOMException, 'IndexSizeError');
+
+ let buffer2 = context.createBuffer(1, 1000, 24576);
+ let expectedDuration = 1000 / 24576;
+
+ should(
+ buffer2.duration, 'context.createBuffer(1, 1000, 24576).duration')
+ .beEqualTo(expectedDuration);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyFromChannel-bufferOffset-1.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyFromChannel-bufferOffset-1.html
new file mode 100644
index 0000000000..564317f7de
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyFromChannel-bufferOffset-1.html
@@ -0,0 +1,11 @@
+<html>
+<head>
+ <title>Test large bufferOffset in copyFromChannel()</title>
+</head>
+<script>
+ const a = new AudioBuffer({length: 0x51986, sampleRate: 44100});
+ const b = new Float32Array(0x10);
+ a.getChannelData(0); // to avoid zero data optimization
+ a.copyFromChannel(b, 0, 0x1523c7cc)
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyToChannel-bufferOffset-1.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyToChannel-bufferOffset-1.html
new file mode 100644
index 0000000000..999925a983
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/crashtests/copyToChannel-bufferOffset-1.html
@@ -0,0 +1,10 @@
+<html>
+<head>
+ <title>Test large bufferOffset in copyToChannel()</title>
+</head>
+<script>
+ const a = new AudioBuffer({length: 0x10, sampleRate: 44100});
+ const b = new Float32Array(0x51986);
+ a.copyToChannel(b, 0, 0x40004000)
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html
new file mode 100644
index 0000000000..fbe6e42e31
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html
@@ -0,0 +1,236 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: AudioBuffer
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ should(() => {
+ new AudioBuffer();
+ }, 'new AudioBuffer()').throw(TypeError);
+ should(() => {
+ new AudioBuffer(1);
+ }, 'new AudioBuffer(1)').throw(TypeError);
+ should(() => {
+ new AudioBuffer(Date, 42);
+ }, 'new AudioBuffer(Date, 42)').throw(TypeError);
+
+ task.done();
+ });
+
+ audit.define('required options', (task, should) => {
+ let buffer;
+
+ // The length and sampleRate attributes are required; all others are
+ // optional.
+ should(() => {
+ new AudioBuffer({});
+ }, 'buffer = new AudioBuffer({})').throw(TypeError);
+
+ should(() => {
+ new AudioBuffer({length: 1});
+ }, 'buffer = new AudioBuffer({length: 1})').throw(TypeError);
+
+ should(() => {
+ new AudioBuffer({sampleRate: 48000});
+ }, 'buffer = new AudioBuffer({sampleRate: 48000})').throw(TypeError);
+
+ should(() => {
+ buffer = new AudioBuffer({numberOfChannels: 1});
+ }, 'buffer = new AudioBuffer({numberOfChannels: 1}').throw(TypeError);
+
+ // Length and sampleRate are required, but others are optional.
+ should(
+ () => {
+ buffer =
+ new AudioBuffer({length: 21, sampleRate: context.sampleRate});
+ },
+ 'buffer0 = new AudioBuffer({length: 21, sampleRate: ' +
+ context.sampleRate + '}')
+ .notThrow();
+ // Verify the buffer has the correct values.
+ should(buffer.numberOfChannels, 'buffer0.numberOfChannels')
+ .beEqualTo(1);
+ should(buffer.length, 'buffer0.length').beEqualTo(21);
+ should(buffer.sampleRate, 'buffer0.sampleRate')
+ .beEqualTo(context.sampleRate);
+
+ should(
+ () => {
+ buffer = new AudioBuffer(
+ {numberOfChannels: 3, length: 1, sampleRate: 48000});
+ },
+ 'buffer1 = new AudioBuffer(' +
+ '{numberOfChannels: 3, length: 1, sampleRate: 48000})')
+ .notThrow();
+ // Verify the buffer has the correct values.
+ should(buffer.numberOfChannels, 'buffer1.numberOfChannels')
+ .beEqualTo(3);
+ should(buffer.length, 'buffer1.length').beEqualTo(1);
+ should(buffer.sampleRate, 'buffer1.sampleRate').beEqualTo(48000);
+
+ task.done();
+ });
+
+ audit.define('invalid option values', (task, should) => {
+ let options = {numberOfChannels: 0, length: 1, sampleRate: 16000};
+ should(
+ () => {
+ let buffer = new AudioBuffer(options);
+ },
+ 'new AudioBuffer(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ options = {numberOfChannels: 99, length: 0, sampleRate: 16000};
+ should(
+ () => {
+ let buffer = new AudioBuffer(options);
+ },
+ 'new AudioBuffer(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ options = {numberOfChannels: 1, length: 0, sampleRate: 16000};
+ should(
+ () => {
+ let buffer = new AudioBuffer(options);
+ },
+ 'new AudioBuffer(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ options = {numberOfChannels: 1, length: 1, sampleRate: 100};
+ should(
+ () => {
+ let buffer = new AudioBuffer(options);
+ },
+ 'new AudioBuffer(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let buffer;
+
+ let options = {numberOfChannels: 5, length: 17, sampleRate: 16000};
+ should(
+ () => {
+ buffer = new AudioBuffer(options);
+ },
+ 'buffer = new AudioBuffer(' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ should(buffer.numberOfChannels, 'buffer.numberOfChannels')
+ .beEqualTo(options.numberOfChannels);
+ should(buffer.length, 'buffer.length').beEqualTo(options.length);
+ should(buffer.sampleRate, 'buffer.sampleRate').beEqualTo(16000);
+
+ task.done();
+ });
+
+ audit.define('valid constructor', (task, should) => {
+ let buffer;
+
+ let options = {numberOfChannels: 3, length: 42, sampleRate: 54321};
+
+ let message = 'new AudioBuffer(' + JSON.stringify(options) + ')';
+ should(() => {
+ buffer = new AudioBuffer(options);
+ }, message).notThrow();
+
+ should(buffer.numberOfChannels, 'buffer.numberOfChannels')
+ .beEqualTo(options.numberOfChannels);
+
+ should(buffer.length, 'buffer.length').beEqualTo(options.length);
+
+ should(buffer.sampleRate, 'buffer.sampleRate')
+ .beEqualTo(options.sampleRate);
+
+ // Verify that we actually got the right number of channels
+ for (let k = 0; k < options.numberOfChannels; ++k) {
+ let data;
+ let message = 'buffer.getChannelData(' + k + ')';
+ should(() => {
+ data = buffer.getChannelData(k);
+ }, message).notThrow();
+
+ should(data.length, message + ' length').beEqualTo(options.length);
+ }
+
+ should(
+ () => {
+ buffer.getChannelData(options.numberOfChannels);
+ },
+ 'buffer.getChannelData(' + options.numberOfChannels + ')')
+ .throw(DOMException, 'IndexSizeError');
+
+ task.done();
+ });
+
+ audit.define('multiple contexts', (task, should) => {
+ // Test that an AudioBuffer can be used for different contexts.
+ let buffer =
+ new AudioBuffer({length: 128, sampleRate: context.sampleRate});
+
+ // Don't use getChannelData here because we want to be able to use
+ // |data| to compare the final results of playing out this buffer. (If
+ // we did, |data| gets detached when the sources play.)
+ let data = new Float32Array(buffer.length);
+ for (let k = 0; k < data.length; ++k)
+ data[k] = 1 + k;
+ buffer.copyToChannel(data, 0);
+
+ let c1 = new OfflineAudioContext(1, 128, context.sampleRate);
+ let c2 = new OfflineAudioContext(1, 128, context.sampleRate);
+
+ let s1 = new AudioBufferSourceNode(c1, {buffer: buffer});
+ let s2 = new AudioBufferSourceNode(c2, {buffer: buffer});
+
+ s1.connect(c1.destination);
+ s2.connect(c2.destination);
+
+ s1.start();
+ s2.start();
+
+ Promise
+ .all([
+ c1.startRendering().then(function(resultBuffer) {
+ return resultBuffer;
+ }),
+ c2.startRendering().then(function(resultBuffer) {
+ return resultBuffer;
+ }),
+ ])
+ .then(resultBuffers => {
+ let c1ResultValue = should(resultBuffers[0].getChannelData(0), 'c1 result')
+ .beEqualToArray(data);
+ let c2ResultValue = should(resultBuffers[1].getChannelData(0), 'c2 result')
+ .beEqualToArray(data);
+ should(
+ c1ResultValue && c2ResultValue,
+ 'AudioBuffer shared between two different contexts')
+ .message('correctly', 'incorrectly');
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html
new file mode 100644
index 0000000000..0fa3089a34
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html
@@ -0,0 +1,100 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Active Processing for AudioBufferSourceNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary sample rate. And we only new a few blocks for rendering to
+ // see if things are working.
+ let sampleRate = 8000;
+ let renderLength = 10 * RENDER_QUANTUM_FRAMES;
+
+ // Offline context used for the tests.
+ let context;
+
+ // Number of channels for the AudioBufferSource. Fairly arbitrary, but
+ // should be more than 2.
+ let numberOfChannels = 7;
+
+ // Number of frames in the AudioBuffer. Fairly arbitrary, but should
+ // probablybe more than one render quantum and significantly less than
+ // |renderLength|.
+ let bufferFrames = 131;
+
+ let filePath =
+ '../the-audioworklet-interface/processors/input-count-processor.js';
+
+ audit.define('Setup graph', (task, should) => {
+ context =
+ new OfflineAudioContext(numberOfChannels, renderLength, sampleRate);
+
+ should(
+ context.audioWorklet.addModule(filePath).then(() => {
+ let buffer = new AudioBuffer({
+ numberOfChannels: numberOfChannels,
+ length: bufferFrames,
+ sampleRate: context.sampleRate
+ });
+
+ src = new AudioBufferSourceNode(context, {buffer: buffer});
+ let counter = new AudioWorkletNode(context, 'counter');
+
+ src.connect(counter).connect(context.destination);
+ src.start();
+ }),
+ 'AudioWorklet and graph construction')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('verify count change', (task, should) => {
+ context.startRendering()
+ .then(renderedBuffer => {
+ let output = renderedBuffer.getChannelData(0);
+
+ // Find the first time the number of channels changes to 1.
+ let countChangeIndex = output.findIndex(x => x == 1);
+
+ // Verify that the count did change. If it didn't there's a bug
+ // in the imploementation, or it takes longer than the render
+ // length to change. for the latter case, increase the render
+ // length, but it can't be arbitrarily large. The change needs to
+ // happen at some reasonable time after the source stops.
+ should(countChangeIndex >= 0, 'Number of channels changed')
+ .beTrue();
+ should(
+ countChangeIndex, 'Index where input channel count changed')
+ .beLessThanOrEqualTo(renderLength);
+
+ // Verify the number of channels at the beginning matches the
+ // number of channels in the AudioBuffer.
+ should(
+ output.slice(0, countChangeIndex),
+ `Number of channels in input[0:${countChangeIndex - 1}]`)
+ .beConstantValueOf(numberOfChannels);
+
+ // Verify that after the source has stopped, the number of
+ // channels is 1.
+ should(
+ output.slice(countChangeIndex),
+ `Number of channels in input[${countChangeIndex}:]`)
+ .beConstantValueOf(1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html
new file mode 100644
index 0000000000..6ce7eb0c10
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-basic.html
@@ -0,0 +1,37 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Basic Test of AudioBufferSourceNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/start-stop-exceptions.js"></script>
+ </head>
+ <script id="layout-test-code">
+ let sampleRate = 44100;
+ let renderLengthSeconds = 0.25;
+
+ let oscTypes = ['sine', 'square', 'sawtooth', 'triangle', 'custom'];
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('start/stop exceptions', (task, should) => {
+ // We're not going to render anything, so make it simple
+ let context = new OfflineAudioContext(1, 1, sampleRate);
+ let node = new AudioBufferSourceNode(context);
+
+ testStartStop(should, node, [
+ {args: [0, -1], errorType: RangeError},
+ {args: [0, 0, -1], errorType: RangeError}
+ ]);
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ <body>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html
new file mode 100644
index 0000000000..f3f16c4c64
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-channels.html
@@ -0,0 +1,97 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiobuffersource-channels.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let context;
+ let source;
+
+ audit.define(
+ {
+ label: 'validate .buffer',
+ description:
+ 'Validatation of AudioBuffer in .buffer attribute setter'
+ },
+ function(task, should) {
+ context = new AudioContext();
+ source = context.createBufferSource();
+
+ // Make sure we can't set to something which isn't an AudioBuffer.
+ should(function() {
+ source.buffer = 57;
+ }, 'source.buffer = 57').throw(TypeError);
+
+ // It's ok to set the buffer to null.
+ should(function() {
+ source.buffer = null;
+ }, 'source.buffer = null').notThrow();
+
+ // Set the buffer to a valid AudioBuffer
+ let buffer =
+ new AudioBuffer({length: 128, sampleRate: context.sampleRate});
+
+ should(function() {
+ source.buffer = buffer;
+ }, 'source.buffer = buffer').notThrow();
+
+ // The buffer has been set; we can't set it again.
+ should(function() {
+ source.buffer =
+ new AudioBuffer({length: 128, sampleRate: context.sampleRate})
+ }, 'source.buffer = new buffer').throw(DOMException, 'InvalidStateError');
+
+ // The buffer has been set; it's ok to set it to null.
+ should(function() {
+ source.buffer = null;
+ }, 'source.buffer = null again').notThrow();
+
+ // The buffer was already set (and set to null). Can't set it
+ // again.
+ should(function() {
+ source.buffer = buffer;
+ }, 'source.buffer = buffer again').throw(DOMException, 'InvalidStateError');
+
+ // But setting to null is ok.
+ should(function() {
+ }, 'source.buffer = null after setting to null').notThrow();
+
+ // Check that mono buffer can be set.
+ should(function() {
+ let monoBuffer =
+ context.createBuffer(1, 1024, context.sampleRate);
+ let testSource = context.createBufferSource();
+ testSource.buffer = monoBuffer;
+ }, 'Setting source with mono buffer').notThrow();
+
+ // Check that stereo buffer can be set.
+ should(function() {
+ let stereoBuffer =
+ context.createBuffer(2, 1024, context.sampleRate);
+ let testSource = context.createBufferSource();
+ testSource.buffer = stereoBuffer;
+ }, 'Setting source with stereo buffer').notThrow();
+
+ // Check buffers with more than two channels.
+ for (let i = 3; i < 10; ++i) {
+ should(function() {
+ let buffer = context.createBuffer(i, 1024, context.sampleRate);
+ let testSource = context.createBufferSource();
+ testSource.buffer = buffer;
+ }, 'Setting source with ' + i + ' channels buffer').notThrow();
+ }
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html
new file mode 100644
index 0000000000..abb8983cc0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-duration-loop.html
@@ -0,0 +1,52 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioBufferSourceNode With Looping And Duration
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ audit.define('loop with duration', (task, should) => {
+ // Create the context
+ let context = new OfflineAudioContext(1, 4096, 48000);
+
+ // Create the sample buffer and fill the second half with 1
+ let buffer = context.createBuffer(1, 2048, context.sampleRate);
+ for (let i = 1024; i < 2048; i++) {
+ buffer.getChannelData(0)[i] = 1;
+ }
+
+ // Create the source and set its value
+ let source = context.createBufferSource();
+ source.loop = true;
+ source.loopStart = 1024 / context.sampleRate;
+ source.loopEnd = 2048 / context.sampleRate;
+ source.buffer = buffer;
+ source.connect(context.destination);
+ source.start(0, 1024 / context.sampleRate, 2048 / context.sampleRate);
+ // Expectations
+ let expected = new Float32Array(4096);
+ for (let i = 0; i < 2048; i++) {
+ expected[i] = 1;
+ }
+ // Render it!
+ context.startRendering()
+ .then(function(audioBuffer) {
+ should(
+ audioBuffer.getChannelData(0), 'audioBuffer.getChannelData')
+ .beEqualToArray(expected);
+ })
+ .then(task.done());
+ });
+
+ audit.run();
+
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html
new file mode 100644
index 0000000000..b9922f61ef
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-ended.html
@@ -0,0 +1,40 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiobuffersource-ended.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audiobuffersource-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context;
+ let source;
+
+ audit.define(
+ 'AudioBufferSourceNode calls its onended EventListener',
+ function(task, should) {
+ let sampleRate = 44100.0;
+ let numberOfFrames = 32;
+ context = new OfflineAudioContext(1, numberOfFrames, sampleRate);
+ source = context.createBufferSource();
+ source.buffer = createTestBuffer(context, numberOfFrames);
+ source.connect(context.destination);
+ source.onended = function() {
+ should(true, 'source.onended called').beTrue();
+ task.done();
+ };
+ source.start(0);
+ context.startRendering();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html
new file mode 100644
index 0000000000..f554304a21
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-grain.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Start Grain with Delayed Buffer Setting
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let context;
+ let source;
+ let buffer;
+ let renderedData;
+
+ let sampleRate = 44100;
+
+ let testDurationSec = 1;
+ let testDurationSamples = testDurationSec * sampleRate;
+ let startTime = 0.9 * testDurationSec;
+
+ audit.define(
+ 'Test setting the source buffer after starting the grain',
+ function(task, should) {
+ context =
+ new OfflineAudioContext(1, testDurationSamples, sampleRate);
+
+ buffer = createConstantBuffer(context, testDurationSamples, 1);
+ source = context.createBufferSource();
+ source.connect(context.destination);
+
+ // Start the source BEFORE we set the buffer. The grain offset and
+ // duration aren't important, as long as we specify some offset.
+ source.start(startTime, .1);
+ source.buffer = buffer;
+
+ // Render it!
+ context.startRendering()
+ .then(function(buffer) {
+ checkResult(buffer, should);
+ })
+ .then(task.done.bind(task));
+ ;
+ });
+
+ function checkResult(buffer, should) {
+ let success = false;
+
+ renderedData = buffer.getChannelData(0);
+
+ // Check that the rendered data is not all zeroes. Any non-zero data
+ // means the test passed.
+ let startFrame = Math.round(startTime * sampleRate);
+ for (k = 0; k < renderedData.length; ++k) {
+ if (renderedData[k]) {
+ success = true;
+ break;
+ }
+ }
+
+ should(success, 'Buffer was played').beTrue();
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html
new file mode 100644
index 0000000000..4e0de21e96
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-multi-channels.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html>
+<!--
+Test AudioBufferSourceNode supports 5.1 channel.
+-->
+<html>
+ <head>
+ <title>
+ audiobuffersource-multi-channels.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/mix-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let context;
+ let expectedAudio;
+
+ audit.define('initialize', (task, should) => {
+ // Create offline audio context
+ let sampleRate = 44100.0;
+ should(() => {
+ context = new OfflineAudioContext(
+ 6, sampleRate * toneLengthSeconds, sampleRate);
+ }, 'Creating context for testing').notThrow();
+ should(
+ Audit
+ .loadFileFromUrl('resources/audiobuffersource-multi-channels-expected.wav')
+ .then(arrayBuffer => {
+ context.decodeAudioData(arrayBuffer).then(audioBuffer => {
+ expectedAudio = audioBuffer;
+ task.done();
+ }).catch(error => {
+ assert_unreached("Could not decode audio data due to " + error.message);
+ })
+ })
+ , 'Fetching expected audio').beResolved();
+ });
+
+ audit.define(
+ {label: 'test', description: 'AudioBufferSource with 5.1 buffer'},
+ (task, should) => {
+ let toneBuffer =
+ createToneBuffer(context, 440, toneLengthSeconds, 6);
+
+ let source = context.createBufferSource();
+ source.buffer = toneBuffer;
+
+ source.connect(context.destination);
+ source.start(0);
+
+ context.startRendering()
+ .then(renderedAudio => {
+ // Compute a threshold based on the maximum error, |maxUlp|,
+ // in ULP. This is experimentally determined. Assuming that
+ // the reference file is a 16-bit wav file, the max values in
+ // the wave file are +/- 32768.
+ let maxUlp = 1;
+ let threshold = maxUlp / 32768;
+ for (let k = 0; k < renderedAudio.numberOfChannels; ++k) {
+ should(
+ renderedAudio.getChannelData(k),
+ 'Rendered audio for channel ' + k)
+ .beCloseToArray(
+ expectedAudio.getChannelData(k),
+ {absoluteThreshold: threshold});
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html
new file mode 100644
index 0000000000..b5b1ec0c3d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-null.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test ABSN Outputs Silence if buffer is null
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ audit.define('ABSN with null buffer', (task, should) => {
+ // Create test context. Length and sampleRate are pretty arbitrary, but
+ // we don't need either to be very large.
+ const context = new OfflineAudioContext(
+ {numberOfChannels: 1, length: 1024, sampleRate: 8192});
+
+ // Just create a constant buffer for testing. Anything will do as long
+ // as the buffer contents are not identically zero.
+ const audioBuffer =
+ new AudioBuffer({length: 10, sampleRate: context.sampleRate});
+ const audioBufferSourceNode = new AudioBufferSourceNode(context);
+
+ audioBuffer.getChannelData(0).fill(1);
+
+ // These two tests are mostly for the informational messages to show
+ // what's happening. They should never fail!
+ should(() => {
+ audioBufferSourceNode.buffer = audioBuffer;
+ }, 'Setting ABSN.buffer to AudioBuffer').notThrow();
+
+ // This is the important part. Setting the buffer to null after setting
+ // it to something else should cause the source to produce silence.
+ should(() => {
+ audioBufferSourceNode.buffer = null;
+ }, 'Setting ABSN.buffer = null').notThrow();
+
+ audioBufferSourceNode.start(0);
+ audioBufferSourceNode.connect(context.destination);
+
+ context.startRendering()
+ .then(buffer => {
+ // Since the buffer is null, the output of the source should be
+ // silence.
+ should(buffer.getChannelData(0), 'ABSN output')
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html
new file mode 100644
index 0000000000..af1454a5a9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-one-sample-loop.html
@@ -0,0 +1,47 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioBufferSourceNode With Looping a Single-Sample Buffer
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100;
+ let testDurationSamples = 1000;
+
+ audit.define('one-sample-loop', function(task, should) {
+ // Create the offline context for the test.
+ let context =
+ new OfflineAudioContext(1, testDurationSamples, sampleRate);
+
+ // Create the single sample buffer
+ let buffer = createConstantBuffer(context, 1, 1);
+
+ // Create the source and connect it to the destination
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+ source.loop = true;
+ source.connect(context.destination);
+ source.start();
+
+ // Render it!
+ context.startRendering()
+ .then(function(audioBuffer) {
+ should(audioBuffer.getChannelData(0), 'Rendered data')
+ .beConstantValueOf(1);
+ })
+ .then(task.done.bind(task));
+ ;
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html
new file mode 100644
index 0000000000..5624054e32
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-playbackrate-zero.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiobuffersource-playbackrate-zero.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Sample rate should be power of 128 to observe the change of AudioParam
+ // at the beginning of rendering quantum. (playbackRate is k-rate) This is
+ // the minimum sample rate in the valid sample rate range.
+ let sampleRate = 8192;
+
+ // The render duration in seconds, and the length in samples.
+ let renderDuration = 1.0;
+ let renderLength = renderDuration * sampleRate;
+
+ let context = new OfflineAudioContext(1, renderLength, sampleRate);
+ let audit = Audit.createTaskRunner();
+
+
+ // Task: Render the actual buffer and compare with the reference.
+ audit.define('synthesize-verify', (task, should) => {
+ let ramp = context.createBufferSource();
+ let rampBuffer = createLinearRampBuffer(context, renderLength);
+ ramp.buffer = rampBuffer;
+
+ ramp.connect(context.destination);
+ ramp.start();
+
+ // Leave the playbackRate as 1 for the first half, then change it
+ // to zero at the exact half. The zero playback rate should hold the
+ // sample value of the buffer index at the moment. (sample-and-hold)
+ ramp.playbackRate.setValueAtTime(1.0, 0.0);
+ ramp.playbackRate.setValueAtTime(0.0, renderDuration / 2);
+
+ context.startRendering()
+ .then(function(renderedBuffer) {
+ let data = renderedBuffer.getChannelData(0);
+ let rampData = rampBuffer.getChannelData(0);
+ let half = rampData.length / 2;
+ let passed = true;
+ let i;
+
+ for (i = 1; i < rampData.length; i++) {
+ if (i < half) {
+ // Before the half position, the actual should match with the
+ // original ramp data.
+ if (data[i] !== rampData[i]) {
+ passed = false;
+ break;
+ }
+ } else {
+ // From the half position, the actual value should not change.
+ if (data[i] !== rampData[half]) {
+ passed = false;
+ break;
+ }
+ }
+ }
+
+ should(passed, 'The zero playbackRate')
+ .message(
+ 'held the sample value correctly',
+ 'should hold the sample value. ' +
+ 'Expected ' + rampData[half] + ' but got ' + data[i] +
+ ' at the index ' + i);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('subsample start with playback rate 0', (task, should) => {
+ let context = new OfflineAudioContext(1, renderLength, sampleRate);
+ let rampBuffer = new AudioBuffer(
+ {length: renderLength, sampleRate: context.sampleRate});
+ let data = new Float32Array(renderLength);
+ let startValue = 5;
+ for (let k = 0; k < data.length; ++k) {
+ data[k] = k + startValue;
+ }
+ rampBuffer.copyToChannel(data, 0);
+
+ let src = new AudioBufferSourceNode(
+ context, {buffer: rampBuffer, playbackRate: 0});
+
+ src.connect(context.destination);
+
+ // Purposely start the source between frame boundaries
+ let startFrame = 27.3;
+ src.start(startFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let actualStartFrame = Math.ceil(startFrame);
+ let audio = audioBuffer.getChannelData(0);
+
+ should(
+ audio.slice(0, actualStartFrame),
+ `output[0:${actualStartFrame - 1}]`)
+ .beConstantValueOf(0);
+ should(
+ audio.slice(actualStartFrame), `output[${actualStartFrame}:]`)
+ .beConstantValueOf(startValue);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html
new file mode 100644
index 0000000000..19331954b0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiobuffersource-start.html
@@ -0,0 +1,174 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiobuffersource-start.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audiobuffersource-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // The following test cases assume an AudioBuffer of length 8 whose PCM
+ // data is a linear ramp, 0, 1, 2, 3,...
+
+ let tests = [
+
+ {
+ description:
+ 'start(when): implicitly play whole buffer from beginning to end',
+ offsetFrame: 'none',
+ durationFrames: 'none',
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 0): play whole buffer from beginning to end explicitly giving offset of 0',
+ offsetFrame: 0,
+ durationFrames: 'none',
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 0, 8_frames): play whole buffer from beginning to end explicitly giving offset of 0 and duration of 8 frames',
+ offsetFrame: 0,
+ durationFrames: 8,
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 4_frames): play with explicit non-zero offset',
+ offsetFrame: 4,
+ durationFrames: 'none',
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 4_frames, 4_frames): play with explicit non-zero offset and duration',
+ offsetFrame: 4,
+ durationFrames: 4,
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 7_frames): play with explicit non-zero offset near end of buffer',
+ offsetFrame: 7,
+ durationFrames: 1,
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 8_frames): play with explicit offset at end of buffer',
+ offsetFrame: 8,
+ durationFrames: 0,
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ {
+ description:
+ 'start(when, 9_frames): play with explicit offset past end of buffer',
+ offsetFrame: 8,
+ durationFrames: 0,
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ // When the duration exceeds the buffer, just play to the end of the
+ // buffer. (This is different from the case when we're looping, which is
+ // tested in loop-comprehensive.)
+ {
+ description:
+ 'start(when, 0, 15_frames): play with whole buffer, with long duration (clipped)',
+ offsetFrame: 0,
+ durationFrames: 15,
+ renderFrames: 16,
+ playbackRate: 1,
+ expected: [0, 1, 2, 3, 4, 5, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0]
+ },
+
+ // Enable test when AudioBufferSourceNode hack is fixed:
+ // https://bugs.webkit.org/show_bug.cgi?id=77224 { description:
+ // "start(when, 3_frames, 3_frames): play a middle section with explicit
+ // offset and duration",
+ // offsetFrame: 3, durationFrames: 3, renderFrames: 16, playbackRate:
+ // 1, expected: [4,5,6,7,0,0,0,0,0,0,0,0,0,0,0,0] },
+
+ ];
+
+ let sampleRate = 44100;
+ let buffer;
+ let bufferFrameLength = 8;
+ let testSpacingFrames = 32;
+ let testSpacingSeconds = testSpacingFrames / sampleRate;
+ let totalRenderLengthFrames = tests.length * testSpacingFrames;
+
+ function runLoopTest(context, testNumber, test) {
+ let source = context.createBufferSource();
+
+ source.buffer = buffer;
+ source.playbackRate.value = test.playbackRate;
+
+ source.connect(context.destination);
+
+ // Render each test one after the other, spaced apart by
+ // testSpacingSeconds.
+ let startTime = testNumber * testSpacingSeconds;
+
+ if (test.offsetFrame == 'none' && test.durationFrames == 'none') {
+ source.start(startTime);
+ } else if (test.durationFrames == 'none') {
+ let offset = test.offsetFrame / context.sampleRate;
+ source.start(startTime, offset);
+ } else {
+ let offset = test.offsetFrame / context.sampleRate;
+ let duration = test.durationFrames / context.sampleRate;
+ source.start(startTime, offset, duration);
+ }
+ }
+
+ audit.define(
+ 'Tests AudioBufferSourceNode start()', function(task, should) {
+ // Create offline audio context.
+ let context =
+ new OfflineAudioContext(1, totalRenderLengthFrames, sampleRate);
+ buffer = createTestBuffer(context, bufferFrameLength);
+
+ for (let i = 0; i < tests.length; ++i)
+ runLoopTest(context, i, tests[i]);
+
+ context.startRendering().then(function(audioBuffer) {
+ checkAllTests(audioBuffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html
new file mode 100644
index 0000000000..20ef4a1c63
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-onended.html
@@ -0,0 +1,101 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Onended Event Listener
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 44100;
+ let renderLengthSeconds = 1;
+ let renderLengthFrames = renderLengthSeconds * sampleRate;
+
+ // Length of the source buffer. Anything less than the render length is
+ // fine.
+ let sourceBufferLengthFrames = renderLengthFrames / 8;
+ // When to stop the oscillator. Anything less than the render time is
+ // fine.
+ let stopTime = renderLengthSeconds / 8;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('absn-set-onended', (task, should) => {
+ // Test that the onended event for an AudioBufferSourceNode is fired
+ // when it is set directly.
+ let context =
+ new OfflineAudioContext(1, renderLengthFrames, sampleRate);
+ let buffer = context.createBuffer(
+ 1, sourceBufferLengthFrames, context.sampleRate);
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+ source.connect(context.destination);
+ source.onended = function(e) {
+ should(
+ true, 'AudioBufferSource.onended called when ended set directly')
+ .beEqualTo(true);
+ };
+ source.start();
+ context.startRendering().then(() => task.done());
+ });
+
+ audit.define('absn-add-listener', (task, should) => {
+ // Test that the onended event for an AudioBufferSourceNode is fired
+ // when addEventListener is used to set the handler.
+ let context =
+ new OfflineAudioContext(1, renderLengthFrames, sampleRate);
+ let buffer = context.createBuffer(
+ 1, sourceBufferLengthFrames, context.sampleRate);
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+ source.connect(context.destination);
+ source.addEventListener('ended', function(e) {
+ should(
+ true,
+ 'AudioBufferSource.onended called when using addEventListener')
+ .beEqualTo(true);
+ });
+ source.start();
+ context.startRendering().then(() => task.done());
+ });
+
+ audit.define('osc-set-onended', (task, should) => {
+ // Test that the onended event for an OscillatorNode is fired when it is
+ // set directly.
+ let context =
+ new OfflineAudioContext(1, renderLengthFrames, sampleRate);
+ let source = context.createOscillator();
+ source.connect(context.destination);
+ source.onended = function(e) {
+ should(true, 'Oscillator.onended called when ended set directly')
+ .beEqualTo(true);
+ };
+ source.start();
+ source.stop(stopTime);
+ context.startRendering().then(() => task.done());
+ });
+
+ audit.define('osc-add-listener', (task, should) => {
+ // Test that the onended event for an OscillatorNode is fired when
+ // addEventListener is used to set the handler.
+ let context =
+ new OfflineAudioContext(1, renderLengthFrames, sampleRate);
+ let source = context.createOscillator();
+ source.connect(context.destination);
+ source.addEventListener('ended', function(e) {
+ should(true, 'Oscillator.onended called when using addEventListener')
+ .beEqualTo(true);
+ });
+ source.start();
+ source.stop(stopTime);
+ context.startRendering().then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html
new file mode 100644
index 0000000000..3ac9c05938
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html
@@ -0,0 +1,74 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Scheduled Sources with Huge Time Limits
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let renderFrames = 1000;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('buffersource: huge stop time', (task, should) => {
+ // We only need to generate a small number of frames for this test.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let src = context.createBufferSource();
+
+ // Constant source of amplitude 1, looping.
+ src.buffer = createConstantBuffer(context, 1, 1);
+ src.loop = true;
+
+ // Create the graph and go!
+ let endTime = 1e300;
+ src.connect(context.destination);
+ src.start();
+ src.stop(endTime);
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ let result = resultBuffer.getChannelData(0);
+ should(
+ result, 'Output from AudioBufferSource.stop(' + endTime + ')')
+ .beConstantValueOf(1);
+ })
+ .then(() => task.done());
+ });
+
+
+ audit.define('oscillator: huge stop time', (task, should) => {
+ // We only need to generate a small number of frames for this test.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let src = context.createOscillator();
+
+ // Create the graph and go!
+ let endTime = 1e300;
+ src.connect(context.destination);
+ src.start();
+ src.stop(endTime);
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ let result = resultBuffer.getChannelData(0);
+ // The buffer should not be empty. Just find the max and verify
+ // that it's not zero.
+ let max = Math.max.apply(null, result);
+ should(
+ max, 'Peak amplitude from oscillator.stop(' + endTime + ')')
+ .beGreaterThan(0);
+ })
+ .then(() => task.done());
+ });
+
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html
new file mode 100644
index 0000000000..c181ceb8e0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/buffer-resampling.html
@@ -0,0 +1,101 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test Extrapolation at end of AudibBuffer in an AudioBufferSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ const sampleRate = 48000;
+
+ // For testing we only need a few render quanta.
+ const renderSamples = 512
+
+ // Sample rate for our buffers. This is the lowest sample rate that is
+ // required to be supported.
+ const bufferRate = 8000;
+
+ // Number of samples in each AudioBuffer; this is fairly arbitrary but
+ // should be less than a render quantum.
+ const bufferLength = 30;
+
+ // Frequency of the sine wave for testing.
+ const frequency = 440;
+
+ audit.define(
+ {
+ label: 'interpolate',
+ description: 'Interpolation of AudioBuffers to context sample rate'
+ },
+ (task, should) => {
+ // The first channel is for the interpolated signal, and the second
+ // channel is for the reference signal from an oscillator.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ length: renderSamples,
+ sampleRate: sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Create a set of AudioBuffers which are samples from a pure sine
+ // wave with frequency |frequency|.
+ const nBuffers = Math.floor(context.length / bufferLength);
+ const omega = 2 * Math.PI * frequency / bufferRate;
+
+ let frameNumber = 0;
+ let startTime = 0;
+
+ for (let k = 0; k < nBuffers; ++k) {
+ // let buffer = context.createBuffer(1, bufferLength,
+ // bufferRate);
+ let buffer = new AudioBuffer(
+ {length: bufferLength, sampleRate: bufferRate});
+ let data = buffer.getChannelData(0);
+ for (let n = 0; n < bufferLength; ++n) {
+ data[n] = Math.sin(omega * frameNumber);
+ ++frameNumber;
+ }
+ // Create a source using this buffer and start it at the end of
+ // the previous buffer.
+ let src = new AudioBufferSourceNode(context, {buffer: buffer});
+
+ src.connect(merger, 0, 0);
+ src.start(startTime);
+ startTime += buffer.duration;
+ }
+
+ // Create the reference sine signal using an oscillator.
+ let osc = new OscillatorNode(
+ context, {type: 'sine', frequency: frequency});
+ osc.connect(merger, 0, 1);
+ osc.start(0);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let actual = audioBuffer.getChannelData(0);
+ let expected = audioBuffer.getChannelData(1);
+
+ should(actual, 'Interpolated sine wave')
+ .beCloseToArray(expected, {absoluteThreshold: 9.0348e-2});
+
+ // Compute SNR between them.
+ let snr = 10 * Math.log10(computeSNR(actual, expected));
+
+ should(snr, `SNR (${snr.toPrecision(4)} dB)`)
+ .beGreaterThanOrEqualTo(37.17);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html
new file mode 100644
index 0000000000..c1c3203451
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/ctor-audiobuffersource.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: AudioBufferSource
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'AudioBufferSourceNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node =
+ testDefaultConstructor(should, 'AudioBufferSourceNode', context, {
+ prefix: prefix,
+ numberOfInputs: 0,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [
+ {name: 'buffer', value: null},
+ {name: 'detune', value: 0},
+ {name: 'loop', value: false},
+ {name: 'loopEnd', value: 0.0},
+ {name: 'loopStart', value: 0.0},
+ {name: 'playbackRate', value: 1.0},
+ ]);
+
+ task.done();
+ });
+
+ audit.define('nullable buffer', (task, should) => {
+ let node;
+ let options = {buffer: null};
+
+ should(
+ () => {
+ node = new AudioBufferSourceNode(context, options);
+ },
+ 'node1 = new AudioBufferSourceNode(c, ' + JSON.stringify(options))
+ .notThrow();
+
+ should(node.buffer, 'node1.buffer').beEqualTo(null);
+
+ task.done();
+ });
+
+ audit.define('constructor options', (task, should) => {
+ let node;
+ let buffer = context.createBuffer(2, 1000, context.sampleRate);
+
+ let options = {
+ buffer: buffer,
+ detune: .5,
+ loop: true,
+ loopEnd: (buffer.length / 2) / context.sampleRate,
+ loopStart: 5 / context.sampleRate,
+ playbackRate: .75
+ };
+
+ let message = 'node = new AudioBufferSourceNode(c, ' +
+ JSON.stringify(options) + ')';
+
+ should(() => {
+ node = new AudioBufferSourceNode(context, options);
+ }, message).notThrow();
+
+ // Use the factory method to create an equivalent node and compare the
+ // results from the constructor against this node.
+ let factoryNode = context.createBufferSource();
+ factoryNode.buffer = options.buffer;
+ factoryNode.detune.value = options.detune;
+ factoryNode.loop = options.loop;
+ factoryNode.loopEnd = options.loopEnd;
+ factoryNode.loopStart = options.loopStart;
+ factoryNode.playbackRate.value = options.playbackRate;
+
+ should(node.buffer === buffer, 'node2.buffer === buffer')
+ .beEqualTo(true);
+ should(node.detune.value, 'node2.detune.value')
+ .beEqualTo(factoryNode.detune.value);
+ should(node.loop, 'node2.loop').beEqualTo(factoryNode.loop);
+ should(node.loopEnd, 'node2.loopEnd').beEqualTo(factoryNode.loopEnd);
+ should(node.loopStart, 'node2.loopStart')
+ .beEqualTo(factoryNode.loopStart);
+ should(node.playbackRate.value, 'node2.playbackRate.value')
+ .beEqualTo(factoryNode.playbackRate.value);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html
new file mode 100644
index 0000000000..37c4462add
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-play.html
@@ -0,0 +1,121 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ note-grain-on-play.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/note-grain-on-testing.js"></script>
+ </head>
+ <body>
+ <div id="description"></div>
+ <div id="console"></div>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // To test noteGrainOn, a single ramp signal is created.
+ // Various sections of the ramp are rendered by noteGrainOn() at
+ // different times, and we verify that the actual output
+ // consists of the correct section of the ramp at the correct
+ // time.
+
+ let linearRampBuffer;
+
+ // Array of the grain offset used for each ramp played.
+ let grainOffsetTime = [];
+
+ // Verify the received signal is a ramp from the correct section
+ // of our ramp signal.
+ function verifyGrain(renderedData, startFrame, endFrame, grainIndex) {
+ let grainOffsetFrame =
+ timeToSampleFrame(grainOffsetTime[grainIndex], sampleRate);
+ let grainFrameLength = endFrame - startFrame;
+ let ramp = linearRampBuffer.getChannelData(0);
+ let isCorrect = true;
+
+ let expected;
+ let actual;
+ let frame;
+
+ for (let k = 0; k < grainFrameLength; ++k) {
+ if (renderedData[startFrame + k] != ramp[grainOffsetFrame + k]) {
+ expected = ramp[grainOffsetFrame + k];
+ actual = renderedData[startFrame + k];
+ frame = startFrame + k;
+ isCorrect = false;
+ break;
+ }
+ }
+ return {
+ verified: isCorrect,
+ expected: expected,
+ actual: actual,
+ frame: frame
+ };
+ }
+
+ function checkResult(buffer, should) {
+ renderedData = buffer.getChannelData(0);
+ let nSamples = renderedData.length;
+
+ // Number of grains that we found that have incorrect data.
+ let invalidGrainDataCount = 0;
+
+ let startEndFrames = findStartAndEndSamples(renderedData);
+
+ // Verify the start and stop times. Not strictly needed for
+ // this test, but it's useful to know that if the ramp data
+ // appears to be incorrect.
+ verifyStartAndEndFrames(startEndFrames, should);
+
+ // Loop through each of the rendered grains and check that
+ // each grain contains our expected ramp.
+ for (let k = 0; k < startEndFrames.start.length; ++k) {
+ // Verify that the rendered data matches the expected
+ // section of our ramp signal.
+ let result = verifyGrain(
+ renderedData, startEndFrames.start[k], startEndFrames.end[k], k);
+ should(result.verified, 'Pulse ' + k + ' contained the expected data')
+ .beTrue();
+ }
+ should(
+ invalidGrainDataCount,
+ 'Number of grains that did not contain the expected data')
+ .beEqualTo(0);
+ }
+
+ audit.define(
+ {
+ label: 'note-grain-on-play',
+ description: 'Test noteGrainOn offset rendering'
+ },
+ function(task, should) {
+ // Create offline audio context.
+ context =
+ new OfflineAudioContext(2, sampleRate * renderTime, sampleRate);
+
+ // Create a linear ramp for testing noteGrainOn.
+ linearRampBuffer = createSignalBuffer(context, function(k) {
+ // Want the ramp to start
+ // with 1, not 0.
+ return k + 1;
+ });
+
+ let grainInfo =
+ playAllGrains(context, linearRampBuffer, numberOfTests);
+
+ grainOffsetTime = grainInfo.grainOffsetTimes;
+
+ context.startRendering().then(function(audioBuffer) {
+ checkResult(audioBuffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html
new file mode 100644
index 0000000000..0db297b42c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/note-grain-on-timing.html
@@ -0,0 +1,47 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ note-grain-on-timing.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/note-grain-on-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let squarePulseBuffer;
+
+ function checkResult(buffer, should) {
+ renderedData = buffer.getChannelData(0);
+ let nSamples = renderedData.length;
+ let startEndFrames = findStartAndEndSamples(renderedData);
+
+ verifyStartAndEndFrames(startEndFrames, should);
+ }
+
+ audit.define('Test timing of noteGrainOn', function(task, should) {
+ // Create offline audio context.
+ context =
+ new OfflineAudioContext(2, sampleRate * renderTime, sampleRate);
+
+ squarePulseBuffer = createSignalBuffer(context, function(k) {
+ return 1
+ });
+
+ playAllGrains(context, squarePulseBuffer, numberOfTests);
+
+ context.startRendering().then(function(audioBuffer) {
+ checkResult(audioBuffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav
new file mode 100644
index 0000000000..ab9d5fe5a9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/resources/audiobuffersource-multi-channels-expected.wav
Binary files differ
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html
new file mode 100644
index 0000000000..5fafd024ee
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sample-accurate-scheduling.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html>
+<!--
+Tests that we are able to schedule a series of notes to playback with sample-accuracy.
+We use an impulse so we can tell exactly where the rendering is happening.
+-->
+<html>
+ <head>
+ <title>
+ sample-accurate-scheduling.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/buffer-loader.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100.0;
+ let lengthInSeconds = 4;
+
+ let context = 0;
+ let bufferLoader = 0;
+ let impulse;
+
+ // See if we can render at exactly these sample offsets.
+ let sampleOffsets = [0, 3, 512, 517, 1000, 1005, 20000, 21234, 37590];
+
+ function createImpulse() {
+ // An impulse has a value of 1 at time 0, and is otherwise 0.
+ impulse = context.createBuffer(2, 512, sampleRate);
+ let sampleDataL = impulse.getChannelData(0);
+ let sampleDataR = impulse.getChannelData(1);
+ sampleDataL[0] = 1.0;
+ sampleDataR[0] = 1.0;
+ }
+
+ function playNote(time) {
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = impulse;
+ bufferSource.connect(context.destination);
+ bufferSource.start(time);
+ }
+
+ function checkSampleAccuracy(buffer, should) {
+ let bufferDataL = buffer.getChannelData(0);
+ let bufferDataR = buffer.getChannelData(1);
+
+ let impulseCount = 0;
+ let badOffsetCount = 0;
+
+ // Left and right channels must be the same.
+ should(bufferDataL, 'Content of left and right channels match and')
+ .beEqualToArray(bufferDataR);
+
+ // Go through every sample and make sure it's 0, except at positions in
+ // sampleOffsets.
+ for (let i = 0; i < buffer.length; ++i) {
+ if (bufferDataL[i] != 0) {
+ // Make sure this index is in sampleOffsets
+ let found = false;
+ for (let j = 0; j < sampleOffsets.length; ++j) {
+ if (sampleOffsets[j] == i) {
+ found = true;
+ break;
+ }
+ }
+ ++impulseCount;
+ should(found, 'Non-zero sample found at sample offset ' + i)
+ .beTrue();
+ if (!found) {
+ ++badOffsetCount;
+ }
+ }
+ }
+
+ should(impulseCount, 'Number of impulses found')
+ .beEqualTo(sampleOffsets.length);
+
+ if (impulseCount == sampleOffsets.length) {
+ should(badOffsetCount, 'bad offset').beEqualTo(0);
+ }
+ }
+
+ audit.define(
+ {label: 'test', description: 'Test sample-accurate scheduling'},
+ function(task, should) {
+
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 2, sampleRate * lengthInSeconds, sampleRate);
+ createImpulse();
+
+ for (let i = 0; i < sampleOffsets.length; ++i) {
+ let timeInSeconds = sampleOffsets[i] / sampleRate;
+ playNote(timeInSeconds);
+ }
+
+ context.startRendering().then(function(buffer) {
+ checkSampleAccuracy(buffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html
new file mode 100644
index 0000000000..3700bfa8ce
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html
@@ -0,0 +1,133 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Sub-Sample Accurate Stitching of ABSNs
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'buffer-stitching-1',
+ description: 'Subsample buffer stitching, same rates'
+ },
+ (task, should) => {
+ const sampleRate = 44100;
+ const bufferRate = 44100;
+ const bufferLength = 30;
+
+ // Experimentally determined thresholds. DO NOT relax these values
+ // to far from these values to make the tests pass.
+ const errorThreshold = 9.0957e-5;
+ const snrThreshold = 85.580;
+
+ // Informative message
+ should(sampleRate, 'Test 1: context.sampleRate')
+ .beEqualTo(sampleRate);
+ testBufferStitching(sampleRate, bufferRate, bufferLength)
+ .then(resultBuffer => {
+ const actual = resultBuffer.getChannelData(0);
+ const expected = resultBuffer.getChannelData(1);
+ should(
+ actual,
+ `Stitched sine-wave buffers at sample rate ${bufferRate}`)
+ .beCloseToArray(
+ expected, {absoluteThreshold: errorThreshold});
+ const SNR = 10 * Math.log10(computeSNR(actual, expected));
+ should(SNR, `SNR (${SNR} dB)`)
+ .beGreaterThanOrEqualTo(snrThreshold);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'buffer-stitching-2',
+ description: 'Subsample buffer stitching, different rates'
+ },
+ (task, should) => {
+ const sampleRate = 44100;
+ const bufferRate = 43800;
+ const bufferLength = 30;
+
+ // Experimentally determined thresholds. DO NOT relax these values
+ // to far from these values to make the tests pass.
+ const errorThreshold = 3.8986e-3;
+ const snrThreshold = 65.737;
+
+ // Informative message
+ should(sampleRate, 'Test 2: context.sampleRate')
+ .beEqualTo(sampleRate);
+ testBufferStitching(sampleRate, bufferRate, bufferLength)
+ .then(resultBuffer => {
+ const actual = resultBuffer.getChannelData(0);
+ const expected = resultBuffer.getChannelData(1);
+ should(
+ actual,
+ `Stitched sine-wave buffers at sample rate ${bufferRate}`)
+ .beCloseToArray(
+ expected, {absoluteThreshold: errorThreshold});
+ const SNR = 10 * Math.log10(computeSNR(actual, expected));
+ should(SNR, `SNR (${SNR} dB)`)
+ .beGreaterThanOrEqualTo(snrThreshold);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ // Create graph to test stitching of consecutive ABSNs. The context rate
+ // is |sampleRate|, and the buffers have a fixed length of |bufferLength|
+ // and rate of |bufferRate|. The |bufferRate| should not be too different
+ // from |sampleRate| because of interpolation of the buffer to the context
+ // rate.
+ function testBufferStitching(sampleRate, bufferRate, bufferLength) {
+ // The context for testing. Channel 0 contains the output from
+ // stitching all the buffers together, and channel 1 contains the
+ // expected output.
+ const context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: sampleRate, sampleRate: sampleRate});
+
+ const merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // The reference is a sine wave at 440 Hz.
+ const ref = new OscillatorNode(context, {frequency: 440, type: 'sine'});
+ ref.connect(merger, 0, 1);
+ ref.start();
+
+ // The test signal is a bunch of short AudioBufferSources containing
+ // bits of a sine wave.
+ let waveSignal = new Float32Array(context.length);
+ const omega = 2 * Math.PI / bufferRate * ref.frequency.value;
+ for (let k = 0; k < context.length; ++k) {
+ waveSignal[k] = Math.sin(omega * k);
+ }
+
+ // Slice the sine wave into many little buffers to be assigned to ABSNs
+ // that are started at the appropriate times to produce a final sine
+ // wave.
+ for (let k = 0; k < context.length; k += bufferLength) {
+ const buffer =
+ new AudioBuffer({length: bufferLength, sampleRate: bufferRate});
+ buffer.copyToChannel(waveSignal.slice(k, k + bufferLength), 0);
+
+ const src = new AudioBufferSourceNode(context, {buffer: buffer});
+ src.connect(merger, 0, 0);
+ src.start(k / bufferRate);
+ }
+
+ return context.startRendering();
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html
new file mode 100644
index 0000000000..8c627f90f2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-scheduling.html
@@ -0,0 +1,423 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Sub-Sample Accurate Scheduling for ABSN
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ // Power of two so there's no roundoff converting from integer frames to
+ // time.
+ let sampleRate = 32768;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('sub-sample accurate start', (task, should) => {
+ // There are two channels, one for each source. Only need to render
+ // quanta for this test.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: 8192, sampleRate: sampleRate});
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // Use a simple linear ramp for the sources with integer steps starting
+ // at 1 to make it easy to verify and test that have sub-sample accurate
+ // start. Ramp starts at 1 so we can easily tell when the source
+ // starts.
+ let rampBuffer = new AudioBuffer(
+ {length: context.length, sampleRate: context.sampleRate});
+ let r = rampBuffer.getChannelData(0);
+ for (let k = 0; k < r.length; ++k) {
+ r[k] = k + 1;
+ }
+
+ const src0 = new AudioBufferSourceNode(context, {buffer: rampBuffer});
+ const src1 = new AudioBufferSourceNode(context, {buffer: rampBuffer});
+
+ // Frame where sources should start. This is pretty arbitrary, but one
+ // should be close to an integer and the other should be close to the
+ // next integer. We do this to catch the case where rounding of the
+ // start frame is being done. Rounding is incorrect.
+ const startFrame = 33;
+ const startFrame0 = startFrame + 0.1;
+ const startFrame1 = startFrame + 0.9;
+
+ src0.connect(merger, 0, 0);
+ src1.connect(merger, 0, 1);
+
+ src0.start(startFrame0 / context.sampleRate);
+ src1.start(startFrame1 / context.sampleRate);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ const output0 = audioBuffer.getChannelData(0);
+ const output1 = audioBuffer.getChannelData(1);
+
+ // Compute the expected output by interpolating the ramp buffer of
+ // the sources if they started at the given frame.
+ const ramp = rampBuffer.getChannelData(0);
+ const expected0 = interpolateRamp(ramp, startFrame0);
+ const expected1 = interpolateRamp(ramp, startFrame1);
+
+ // Verify output0 has the correct values
+
+ // For information only
+ should(startFrame0, 'src0 start frame').beEqualTo(startFrame0);
+
+ // Output must be zero before the source start frame, and it must
+ // be interpolated correctly after the start frame. The
+ // absoluteThreshold below is currently set for Chrome which does
+ // linear interpolation. This needs to be updated eventually if
+ // other browsers do not user interpolation.
+ should(
+ output0.slice(0, startFrame + 1), `output0[0:${startFrame}]`)
+ .beConstantValueOf(0);
+ should(
+ output0.slice(startFrame + 1, expected0.length),
+ `output0[${startFrame + 1}:${expected0.length - 1}]`)
+ .beCloseToArray(
+ expected0.slice(startFrame + 1), {absoluteThreshold: 0});
+
+ // Verify output1 has the correct values. Same approach as for
+ // output0.
+ should(startFrame1, 'src1 start frame').beEqualTo(startFrame1);
+
+ should(
+ output1.slice(0, startFrame + 1), `output1[0:${startFrame}]`)
+ .beConstantValueOf(0);
+ should(
+ output1.slice(startFrame + 1, expected1.length),
+ `output1[${startFrame + 1}:${expected1.length - 1}]`)
+ .beCloseToArray(
+ expected1.slice(startFrame + 1), {absoluteThreshold: 0});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('sub-sample accurate stop', (task, should) => {
+ // There are threes channesl, one for each source. Only need to render
+ // quanta for this test.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 3, length: 128, sampleRate: sampleRate});
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // The source can be as simple constant for this test.
+ let buffer = new AudioBuffer(
+ {length: context.length, sampleRate: context.sampleRate});
+ buffer.getChannelData(0).fill(1);
+
+ const src0 = new AudioBufferSourceNode(context, {buffer: buffer});
+ const src1 = new AudioBufferSourceNode(context, {buffer: buffer});
+ const src2 = new AudioBufferSourceNode(context, {buffer: buffer});
+
+ // Frame where sources should start. This is pretty arbitrary, but one
+ // should be an integer, one should be close to an integer and the other
+ // should be close to the next integer. This is to catch the case where
+ // rounding is used for the end frame. Rounding is incorrect.
+ const endFrame = 33;
+ const endFrame1 = endFrame + 0.1;
+ const endFrame2 = endFrame + 0.9;
+
+ src0.connect(merger, 0, 0);
+ src1.connect(merger, 0, 1);
+ src2.connect(merger, 0, 2);
+
+ src0.start(0);
+ src1.start(0);
+ src2.start(0);
+ src0.stop(endFrame / context.sampleRate);
+ src1.stop(endFrame1 / context.sampleRate);
+ src2.stop(endFrame2 / context.sampleRate);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let actual0 = audioBuffer.getChannelData(0);
+ let actual1 = audioBuffer.getChannelData(1);
+ let actual2 = audioBuffer.getChannelData(2);
+
+ // Just verify that we stopped at the right time.
+
+ // This is case where the end frame is an integer. Since the first
+ // output ends on an exact frame, the output must be zero at that
+ // frame number. We print the end frame for information only; it
+ // makes interpretation of the rest easier.
+ should(endFrame - 1, 'src0 end frame')
+ .beEqualTo(endFrame - 1);
+ should(actual0[endFrame - 1], `output0[${endFrame - 1}]`)
+ .notBeEqualTo(0);
+ should(actual0.slice(endFrame),
+ `output0[${endFrame}:]`)
+ .beConstantValueOf(0);
+
+ // The case where the end frame is just a little above an integer.
+ // The output must not be zero just before the end and must be zero
+ // after.
+ should(endFrame1, 'src1 end frame')
+ .beEqualTo(endFrame1);
+ should(actual1[endFrame], `output1[${endFrame}]`)
+ .notBeEqualTo(0);
+ should(actual1.slice(endFrame + 1),
+ `output1[${endFrame + 1}:]`)
+ .beConstantValueOf(0);
+
+ // The case where the end frame is just a little below an integer.
+ // The output must not be zero just before the end and must be zero
+ // after.
+ should(endFrame2, 'src2 end frame')
+ .beEqualTo(endFrame2);
+ should(actual2[endFrame], `output2[${endFrame}]`)
+ .notBeEqualTo(0);
+ should(actual2.slice(endFrame + 1),
+ `output2[${endFrame + 1}:]`)
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('sub-sample-grain', (task, should) => {
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: 128, sampleRate: sampleRate});
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // The source can be as simple constant for this test.
+ let buffer = new AudioBuffer(
+ {length: context.length, sampleRate: context.sampleRate});
+ buffer.getChannelData(0).fill(1);
+
+ let src0 = new AudioBufferSourceNode(context, {buffer: buffer});
+ let src1 = new AudioBufferSourceNode(context, {buffer: buffer});
+
+ src0.connect(merger, 0, 0);
+ src1.connect(merger, 0, 1);
+
+ // Start a short grain.
+ const src0StartGrain = 3.1;
+ const src0EndGrain = 37.2;
+ src0.start(
+ src0StartGrain / context.sampleRate, 0,
+ (src0EndGrain - src0StartGrain) / context.sampleRate);
+
+ const src1StartGrain = 5.8;
+ const src1EndGrain = 43.9;
+ src1.start(
+ src1StartGrain / context.sampleRate, 0,
+ (src1EndGrain - src1StartGrain) / context.sampleRate);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let output0 = audioBuffer.getChannelData(0);
+ let output1 = audioBuffer.getChannelData(1);
+
+ let expected = new Float32Array(context.length);
+
+ // Compute the expected output for output0 and verify the actual
+ // output matches.
+ expected.fill(1);
+ for (let k = 0; k <= Math.floor(src0StartGrain); ++k) {
+ expected[k] = 0;
+ }
+ for (let k = Math.ceil(src0EndGrain); k < expected.length; ++k) {
+ expected[k] = 0;
+ }
+
+ verifyGrain(should, output0, {
+ startGrain: src0StartGrain,
+ endGrain: src0EndGrain,
+ sourceName: 'src0',
+ outputName: 'output0'
+ });
+
+ verifyGrain(should, output1, {
+ startGrain: src1StartGrain,
+ endGrain: src1EndGrain,
+ sourceName: 'src1',
+ outputName: 'output1'
+ });
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ 'sub-sample accurate start with playbackRate', (task, should) => {
+ // There are two channels, one for each source. Only need to render
+ // quanta for this test.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: 8192, sampleRate: sampleRate});
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // Use a simple linear ramp for the sources with integer steps
+ // starting at 1 to make it easy to verify and test that have
+ // sub-sample accurate start. Ramp starts at 1 so we can easily
+ // tell when the source starts.
+ let buffer = new AudioBuffer(
+ {length: context.length, sampleRate: context.sampleRate});
+ let r = buffer.getChannelData(0);
+ for (let k = 0; k < r.length; ++k) {
+ r[k] = k + 1;
+ }
+
+ // Two sources with different playback rates
+ const src0 = new AudioBufferSourceNode(
+ context, {buffer: buffer, playbackRate: .25});
+ const src1 = new AudioBufferSourceNode(
+ context, {buffer: buffer, playbackRate: 4});
+
+ // Frame where sources start. Pretty arbitrary but should not be an
+ // integer.
+ const startFrame = 17.8;
+
+ src0.connect(merger, 0, 0);
+ src1.connect(merger, 0, 1);
+
+ src0.start(startFrame / context.sampleRate);
+ src1.start(startFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ const output0 = audioBuffer.getChannelData(0);
+ const output1 = audioBuffer.getChannelData(1);
+
+ const frameBefore = Math.floor(startFrame);
+ const frameAfter = frameBefore + 1;
+
+ // Informative message so we know what the following output
+ // indices really mean.
+ should(startFrame, 'Source start frame')
+ .beEqualTo(startFrame);
+
+ // Verify the output
+
+ // With a startFrame of 17.8, the first output is at frame 18,
+ // but the actual start is at 17.8. So we would interpolate
+ // the output 0.2 fraction of the way between 17.8 and 18, for
+ // an output of 1.2 for our ramp. But the playback rate is
+ // 0.25, so we're really only 1/4 as far along as we think so
+ // the output is .2*0.25 of the way between 1 and 2 or 1.05.
+
+ const ramp0 = buffer.getChannelData(0)[0];
+ const ramp1 = buffer.getChannelData(0)[1];
+
+ const src0Output = ramp0 +
+ (ramp1 - ramp0) * (frameAfter - startFrame) *
+ src0.playbackRate.value;
+
+ let playbackMessage =
+ `With playbackRate ${src0.playbackRate.value}:`;
+
+ should(
+ output0[frameBefore],
+ `${playbackMessage} output0[${frameBefore}]`)
+ .beEqualTo(0);
+ should(
+ output0[frameAfter],
+ `${playbackMessage} output0[${frameAfter}]`)
+ .beCloseTo(src0Output, {threshold: 4.542e-8});
+
+ const src1Output = ramp0 +
+ (ramp1 - ramp0) * (frameAfter - startFrame) *
+ src1.playbackRate.value;
+
+ playbackMessage =
+ `With playbackRate ${src1.playbackRate.value}:`;
+
+ should(
+ output1[frameBefore],
+ `${playbackMessage} output1[${frameBefore}]`)
+ .beEqualTo(0);
+ should(
+ output1[frameAfter],
+ `${playbackMessage} output1[${frameAfter}]`)
+ .beCloseTo(src1Output, {threshold: 4.542e-8});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ // Given an input ramp in |rampBuffer|, interpolate the signal assuming
+ // this ramp is used for an ABSN that starts at frame |startFrame|, which
+ // is not necessarily an integer. For simplicity we just use linear
+ // interpolation here. The interpolation is not part of the spec but
+ // this should be pretty close to whatever interpolation is being done.
+ function interpolateRamp(rampBuffer, startFrame) {
+ // |start| is the last zero sample before the ABSN actually starts.
+ const start = Math.floor(startFrame);
+ // One less than the rampBuffer because we can't linearly interpolate
+ // the last frame.
+ let result = new Float32Array(rampBuffer.length - 1);
+
+ for (let k = 0; k <= start; ++k) {
+ result[k] = 0;
+ }
+
+ // Now start linear interpolation.
+ let frame = startFrame;
+ let index = 1;
+ for (let k = start + 1; k < result.length; ++k) {
+ let s0 = rampBuffer[index];
+ let s1 = rampBuffer[index - 1];
+ let delta = frame - k;
+ let s = s1 - delta * (s0 - s1);
+ result[k] = s;
+ ++frame;
+ ++index;
+ }
+
+ return result;
+ }
+
+ function verifyGrain(should, output, options) {
+ let {startGrain, endGrain, sourceName, outputName} = options;
+ let expected = new Float32Array(output.length);
+ // Compute the expected output for output and verify the actual
+ // output matches.
+ expected.fill(1);
+ for (let k = 0; k <= Math.floor(startGrain); ++k) {
+ expected[k] = 0;
+ }
+ for (let k = Math.ceil(endGrain); k < expected.length; ++k) {
+ expected[k] = 0;
+ }
+
+ should(startGrain, `${sourceName} grain start`).beEqualTo(startGrain);
+ should(endGrain - startGrain, `${sourceName} grain duration`)
+ .beEqualTo(endGrain - startGrain);
+ should(endGrain, `${sourceName} grain end`).beEqualTo(endGrain);
+ should(output, outputName).beEqualToArray(expected);
+ should(
+ output[Math.floor(startGrain)],
+ `${outputName}[${Math.floor(startGrain)}]`)
+ .beEqualTo(0);
+ should(
+ output[1 + Math.floor(startGrain)],
+ `${outputName}[${1 + Math.floor(startGrain)}]`)
+ .notBeEqualTo(0);
+ should(
+ output[Math.floor(endGrain)],
+ `${outputName}[${Math.floor(endGrain)}]`)
+ .notBeEqualTo(0);
+ should(
+ output[1 + Math.floor(endGrain)],
+ `${outputName}[${1 + Math.floor(endGrain)}]`)
+ .beEqualTo(0);
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html
new file mode 100644
index 0000000000..a83fa1dbe6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-detached-execution-context.html
@@ -0,0 +1,31 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Testing behavior of AudioContext after execution context is detached
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ audit.define('decoding-on-detached-iframe', (task, should) => {
+ const iframe =
+ document.createElementNS("http://www.w3.org/1999/xhtml", "iframe");
+ document.body.appendChild(iframe);
+ let context = new iframe.contentWindow.AudioContext();
+ document.body.removeChild(iframe);
+
+ should(context.decodeAudioData(new ArrayBuffer(1)),
+ 'decodeAudioData() upon a detached iframe')
+ .beRejectedWith('InvalidStateError')
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html
new file mode 100644
index 0000000000..5889faf7cc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp-cross-realm.html
@@ -0,0 +1,32 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Testing AudioContext.getOutputTimestamp() method (cross-realm)
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ audit.define("getoutputtimestamp-cross-realm", function(task, should) {
+ const mainContext = new AudioContext();
+ return task.timeout(() => {
+ const iframe = document.createElement("iframe");
+ document.body.append(iframe);
+ const iframeContext = new iframe.contentWindow.AudioContext();
+
+ should(mainContext.getOutputTimestamp().performanceTime, "mainContext's performanceTime")
+ .beGreaterThan(iframeContext.getOutputTimestamp().performanceTime, "iframeContext's performanceTime");
+ should(iframeContext.getOutputTimestamp.call(mainContext).performanceTime, "mainContext's performanceTime (via iframeContext's method)")
+ .beCloseTo(mainContext.getOutputTimestamp().performanceTime, "mainContext's performanceTime", { threshold: 0.01 });
+ }, 1000);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html
new file mode 100644
index 0000000000..952f38b1ed
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-getoutputtimestamp.html
@@ -0,0 +1,33 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Testing AudioContext.getOutputTimestamp() method
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('getoutputtimestamp-initial-values', function(task, should) {
+ let context = new AudioContext;
+ let timestamp = context.getOutputTimestamp();
+
+ should(timestamp.contextTime, 'timestamp.contextTime').exist();
+ should(timestamp.performanceTime, 'timestamp.performanceTime').exist();
+
+ should(timestamp.contextTime, 'timestamp.contextTime')
+ .beGreaterThanOrEqualTo(0);
+ should(timestamp.performanceTime, 'timestamp.performanceTime')
+ .beGreaterThanOrEqualTo(0);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html
new file mode 100644
index 0000000000..e4f6001eda
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-not-fully-active.html
@@ -0,0 +1,94 @@
+<!doctype html>
+<title>Test AudioContext construction when document is not fully active</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script src="/common/get-host-info.sub.js"></script>
+<body></body>
+<script>
+const dir = location.pathname.replace(/\/[^\/]*$/, '/');
+const helper = dir + 'resources/not-fully-active-helper.sub.html?childsrc=';
+const remote_helper = get_host_info().HTTP_NOTSAMESITE_ORIGIN + helper;
+const blank_url = get_host_info().ORIGIN + '/common/blank.html';
+
+const load_content = (frame, src) => {
+ if (src == undefined) {
+ frame.srcdoc = '<html></html>';
+ } else {
+ frame.src = src;
+ }
+ return new Promise(resolve => frame.onload = () => resolve(frame));
+};
+const append_iframe = (src) => {
+ const frame = document.createElement('iframe');
+ document.body.appendChild(frame);
+ return load_content(frame, src);
+};
+const remote_op = (win, op) => {
+ win.postMessage(op, '*');
+ return new Promise(resolve => window.onmessage = e => {
+ if (e.data == 'DONE ' + op) resolve();
+ });
+};
+const test_constructor_throws = async (win, deactivate) => {
+ const {AudioContext, DOMException} = win;
+ await deactivate();
+ assert_throws_dom("InvalidStateError", DOMException,
+ () => new AudioContext());
+};
+
+promise_test(async () => {
+ const frame = await append_iframe();
+ return test_constructor_throws(frame.contentWindow, () => frame.remove());
+}, "removed frame");
+promise_test(async () => {
+ const frame = await append_iframe();
+ return test_constructor_throws(frame.contentWindow,
+ () => load_content(frame));
+}, "navigated frame");
+promise_test(async () => {
+ const frame = await append_iframe(helper + blank_url);
+ const inner = frame.contentWindow.frames[0];
+ return test_constructor_throws(inner, () => frame.remove());
+}, "frame in removed frame");
+promise_test(async () => {
+ const frame = await append_iframe(helper + blank_url);
+ const inner = frame.contentWindow.frames[0];
+ return test_constructor_throws(inner, () => load_content(frame));
+}, "frame in navigated frame");
+promise_test(async () => {
+ const frame = await append_iframe(remote_helper + blank_url);
+ const inner = frame.contentWindow.frames[0];
+ return test_constructor_throws(inner, () => frame.remove());
+}, "frame in removed remote-site frame");
+promise_test(async () => {
+ const frame = await append_iframe(remote_helper + blank_url);
+ const inner = frame.contentWindow.frames[0];
+ return test_constructor_throws(inner, () => load_content(frame));
+}, "frame in navigated remote-site frame");
+promise_test(async () => {
+ const outer = (await append_iframe(remote_helper + blank_url)).contentWindow;
+ const inner = outer.frames[0];
+ return test_constructor_throws(inner,
+ () => remote_op(outer, 'REMOVE FRAME'));
+}, "removed frame in remote-site frame");
+promise_test(async () => {
+ const outer = (await append_iframe(remote_helper + blank_url)).contentWindow;
+ const inner = outer.frames[0];
+ return test_constructor_throws(inner,
+ () => remote_op(outer, 'NAVIGATE FRAME'));
+}, "navigated frame in remote-site frame");
+promise_test(async () => {
+ const url = remote_helper + helper + blank_url;
+ const outer = (await append_iframe(url)).contentWindow;
+ const inner = outer.frames[0].frames[0];
+ return test_constructor_throws(inner,
+ () => remote_op(outer, 'REMOVE FRAME'));
+}, "frame in removed remote-site frame in remote-site frame");
+promise_test(async () => {
+ const url = remote_helper + helper + blank_url;
+ const outer = (await append_iframe(url)).contentWindow;
+ const inner = outer.frames[0].frames[0];
+ return test_constructor_throws(inner,
+ () => remote_op(outer, 'NAVIGATE FRAME'));
+}, "frame in navigated remote-site frame in remote-site frame");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html
new file mode 100644
index 0000000000..2dedd6cd36
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-constructor.https.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html>
+<head>
+<title>Test AudioContext constructor with sinkId options</title>
+</head>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+"use strict";
+
+let outputDeviceList = null;
+let firstDeviceId = null;
+
+navigator.mediaDevices.getUserMedia({audio: true}).then(() => {
+ navigator.mediaDevices.enumerateDevices().then((deviceList) => {
+ outputDeviceList =
+ deviceList.filter(({kind}) => kind === 'audiooutput');
+ assert_greater_than(outputDeviceList.length, 1,
+ 'the system must have more than 1 device.');
+ firstDeviceId = outputDeviceList[1].deviceId;
+
+ // Run async tests concurrently.
+ async_test(t => testDefaultSinkId(t),
+ 'Setting sinkId to the empty string at construction should ' +
+ 'succeed.');
+ async_test(t => testValidSinkId(t),
+ 'Setting sinkId with a valid device identifier at ' +
+ 'construction should succeed.');
+ async_test(t => testAudioSinkOptions(t),
+ 'Setting sinkId with an AudioSinkOptions at construction ' +
+ 'should succeed.');
+ async_test(t => testExceptions(t),
+ 'Invalid sinkId arguments should throw an appropriate ' +
+ 'exception.')
+ });
+});
+
+// 1.2.1. AudioContext constructor
+// https://webaudio.github.io/web-audio-api/#AudioContext-constructors
+
+// Step 10.1.1. If sinkId is equal to [[sink ID]], abort these substeps.
+const testDefaultSinkId = (t) => {
+ // The initial `sinkId` is the empty string. This will cause the same value
+ // check.
+ const audioContext = new AudioContext({sinkId: ''});
+ audioContext.addEventListener('statechange', () => {
+ t.step(() => {
+ assert_equals(audioContext.sinkId, '');
+ assert_equals(audioContext.state, 'running');
+ });
+ audioContext.close();
+ t.done();
+ }, {once: true});
+};
+
+// Step 10.1.2~3: See "Validating sinkId" tests below.
+
+// Step 10.1.4. If sinkId is a type of DOMString, set [[sink ID]] to sinkId and
+// abort these substeps.
+const testValidSinkId = (t) => {
+ const audioContext = new AudioContext({sinkId: firstDeviceId});
+ audioContext.addEventListener('statechange', () => {
+ t.step(() => {
+ assert_true(audioContext.sinkId === firstDeviceId,
+ 'the context sinkId should match the given sinkId.');
+ });
+ audioContext.close();
+ t.done();
+ }, {once: true});
+ t.step_timeout(t.unreached_func('onstatechange not fired or assert failed'),
+ 100);
+};
+
+// Step 10.1.5. If sinkId is a type of AudioSinkOptions, set [[sink ID]] to a
+// new instance of AudioSinkInfo created with the value of type of sinkId.
+const testAudioSinkOptions = (t) => {
+ const audioContext = new AudioContext({sinkId: {type: 'none'}});
+ // The only signal we can use for the sinkId change after construction is
+ // `statechange` event.
+ audioContext.addEventListener('statechange', () => {
+ t.step(() => {
+ assert_equals(typeof audioContext.sinkId, 'object');
+ assert_equals(audioContext.sinkId.type, 'none');
+ });
+ audioContext.close();
+ t.done();
+ }, {once: true});
+ t.step_timeout(t.unreached_func('onstatechange not fired or assert failed'),
+ 100);
+};
+
+// 1.2.4. Validating sinkId
+// https://webaudio.github.io/web-audio-api/#validating-sink-identifier
+
+// Step 3. If document is not allowed to use the feature identified by
+// "speaker-selection", return a new DOMException whose name is
+// "NotAllowedError".
+// TODO: Due to the lack of implementation, this step is not tested.
+
+const testExceptions = (t) => {
+ t.step(() => {
+ // The wrong AudioSinkOption.type should cause a TypeError.
+ assert_throws_js(TypeError, () => {
+ const audioContext = new AudioContext({sinkId: {type: 'something_else'}});
+ audioContext.close();
+ }, 'An invalid AudioSinkOptions.type value should throw a TypeError ' +
+ 'exception.');
+ });
+
+ t.step(() => {
+ // Step 4. If sinkIdArg is a type of DOMString but it is not equal to the
+ // empty string or it does not match any audio output device identified by
+ // the result that would be provided by enumerateDevices(), return a new
+ // DOMException whose name is "NotFoundError".
+ assert_throws_dom('NotFoundError', () => {
+ const audioContext = new AudioContext({sinkId: 'some_random_device_id'});
+ audioContext.close();
+ }, 'An invalid device identifier should throw a NotFoundError exception.');
+ });
+ t.done();
+};
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html
new file mode 100644
index 0000000000..c4fbe41e92
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-setsinkid.https.html
@@ -0,0 +1,111 @@
+<!DOCTYPE html>
+<head>
+<title>Test AudioContext.setSinkId() method</title>
+</head>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+"use strict";
+
+const audioContext = new AudioContext();
+let outputDeviceList = null;
+let firstDeviceId = null;
+
+// Setup: Get permission via getUserMedia() and a list of audio output devices.
+promise_setup(async t => {
+ await navigator.mediaDevices.getUserMedia({ audio: true });
+ const deviceList = await navigator.mediaDevices.enumerateDevices();
+ outputDeviceList =
+ deviceList.filter(({kind}) => kind === 'audiooutput');
+ assert_greater_than(outputDeviceList.length, 1,
+ 'the system must have more than 1 device.');
+ firstDeviceId = outputDeviceList[1].deviceId;
+}, 'Get permission via getUserMedia() and a list of audio output devices.');
+
+
+// 1.2.3. AudioContext.setSinkId() method
+// https://webaudio.github.io/web-audio-api/#dom-audiocontext-setsinkid-domstring-or-audiosinkoptions-sinkid
+
+promise_test(async t => {
+ t.step(() => {
+ // The default value of `sinkId` is the empty string.
+ assert_equals(audioContext.sinkId, '');
+ });
+ t.done();
+}, 'setSinkId() with a valid device identifier should succeeded.');
+
+promise_test(async t => {
+ // Change to the first non-default device in the list.
+ await audioContext.setSinkId(firstDeviceId);
+ t.step(() => {
+ // If both `sinkId` and [[sink ID]] are a type of DOMString, and they are
+ // equal to each other, resolve the promise immediately.
+ assert_equals(typeof audioContext.sinkId, 'string');
+ assert_equals(audioContext.sinkId, firstDeviceId);
+ });
+ return audioContext.setSinkId(firstDeviceId);
+}, 'setSinkId() with the same sink ID should resolve immediately.');
+
+promise_test(async t => {
+ // If sinkId is a type of AudioSinkOptions and [[sink ID]] is a type of
+ // AudioSinkInfo, and type in sinkId and type in [[sink ID]] are equal,
+ // resolve the promise immediately.
+ await audioContext.setSinkId({type: 'none'});
+ t.step(() => {
+ assert_equals(typeof audioContext.sinkId, 'object');
+ assert_equals(audioContext.sinkId.type, 'none');
+ });
+ return audioContext.setSinkId({type: 'none'});
+}, 'setSinkId() with the same AudioSinkOptions.type value should resolve ' +
+ 'immediately.');
+
+// 1.2.4. Validating sinkId
+// https://webaudio.github.io/web-audio-api/#validating-sink-identifier
+
+// Step 3. If document is not allowed to use the feature identified by
+// "speaker-selection", return a new DOMException whose name is
+// "NotAllowedError".
+// TODO: Due to the lack of implementation, this step is not tested.
+
+// The wrong AudioSinkOption.type should cause a TypeError.
+promise_test(t =>
+ promise_rejects_js(t, TypeError,
+ audioContext.setSinkId({type: 'something_else'})),
+ 'setSinkId() should fail with TypeError on an invalid ' +
+ 'AudioSinkOptions.type value.');
+
+// Step 4. If sinkId is a type of DOMString but it is not equal to the empty
+// string or it does not match any audio output device identified by the result
+// that would be provided by enumerateDevices(), return a new DOMException whose
+// name is "NotFoundError".
+promise_test(t =>
+ promise_rejects_dom(t, 'NotFoundError',
+ audioContext.setSinkId('some_random_device_id')),
+ 'setSinkId() should fail with NotFoundError on an invalid device ' +
+ 'identifier.');
+
+// setSinkId invoked from closed AudioContext should throw InvalidStateError
+// DOMException.
+promise_test(async t => {
+ await audioContext.close();
+ t.step(() => {
+ assert_equals(audioContext.state, 'closed');
+ });
+ promise_rejects_dom(t, 'InvalidStateError',
+ audioContext.setSinkId('some_random_device_id'))
+},'setSinkId() should fail with InvalidStateError when calling from a' +
+ 'stopped AudioContext');
+
+// setSinkId invoked from detached document should throw InvalidStateError
+// DOMException.
+promise_test(async t => {
+ const iframe = document.createElementNS("http://www.w3.org/1999/xhtml", "iframe");
+ document.body.appendChild(iframe);
+ let iframeAudioContext = new iframe.contentWindow.AudioContext();
+ document.body.removeChild(iframe);
+ promise_rejects_dom(t, 'InvalidStateError',
+ iframeAudioContext.setSinkId('some_random_device_id'));
+},'setSinkId() should fail with InvalidStateError when calling from a' +
+ 'detached document');
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html
new file mode 100644
index 0000000000..c22f69c18d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-sinkid-state-change.https.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html>
+<head>
+<title>Test AudioContext.setSinkId() state change</title>
+</head>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+"use strict";
+
+const audioContext = new AudioContext();
+let outputDeviceList = null;
+let firstDeviceId = null;
+
+// Setup: Get permission via getUserMedia() and a list of audio output devices.
+promise_setup(async t => {
+ await navigator.mediaDevices.getUserMedia({ audio: true });
+ const deviceList = await navigator.mediaDevices.enumerateDevices();
+ outputDeviceList =
+ deviceList.filter(({kind}) => kind === 'audiooutput');
+ assert_greater_than(outputDeviceList.length, 1,
+ 'the system must have more than 1 device.');
+ firstDeviceId = outputDeviceList[1].deviceId;
+}, 'Get permission via getUserMedia() and a list of audio output devices.');
+
+// Test the sink change when from a suspended context.
+promise_test(async t => {
+ let events = [];
+ await audioContext.suspend();
+
+ // Step 6. Set wasRunning to false if the [[rendering thread state]] on the
+ // AudioContext is "suspended".
+ assert_equals(audioContext.state, 'suspended');
+
+ // Step 11.5. Fire an event named sinkchange at the associated AudioContext.
+ audioContext.onsinkchange = t.step_func(() => {
+ events.push('sinkchange');
+ assert_equals(audioContext.sinkId, firstDeviceId);
+ });
+
+ await audioContext.setSinkId(firstDeviceId);
+ assert_equals(events[0], 'sinkchange');
+ t.done();
+}, 'Calling setSinkId() on a suspended AudioContext should fire only sink ' +
+ 'change events.');
+
+// Test the sink change when from a running context.
+promise_test(async t => {
+ let events = [];
+ await audioContext.resume();
+
+ // Step 9. If wasRunning is true:
+ assert_equals(audioContext.state, 'running');
+
+ // Step 9.2.1. Set the state attribute of the AudioContext to "suspended".
+ // Fire an event named statechange at the associated AudioContext.
+ audioContext.onstatechange = t.step_func(() => {
+ events.push('statechange:suspended');
+ assert_equals(audioContext.state, 'suspended');
+ });
+
+ // Step 11.5. Fire an event named sinkchange at the associated AudioContext.
+ audioContext.onsinkchange = t.step_func(() => {
+ events.push('sinkchange');
+ assert_equals(audioContext.sinkId, firstDeviceId);
+ });
+
+ // Step 12.2. Set the state attribute of the AudioContext to "running".
+ // Fire an event named statechange at the associated AudioContext.
+ audioContext.onstatechange = t.step_func(() => {
+ events.push('statechange:running');
+ assert_equals(audioContext.state, 'running');
+ });
+
+ await audioContext.setSinkId(firstDeviceId);
+ assert_equals(events.length, 3);
+ assert_equals(events[0], 'statechange:suspended');
+ assert_equals(events[1], 'sinkchange');
+ assert_equals(events[2], 'statechange:running');
+ t.done();
+}, 'Calling setSinkId() on a running AudioContext should fire both state ' +
+ 'and sink change events.');
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html
new file mode 100644
index 0000000000..192317dda2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume-close.html
@@ -0,0 +1,406 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script type="module">
+"use strict";
+
+function tryToCreateNodeOnClosedContext(ctx) {
+ assert_equals(ctx.state, "closed", "The context is in closed state");
+
+ [
+ { name: "createBufferSource" },
+ {
+ name: "createMediaStreamDestination",
+ onOfflineAudioContext: false,
+ },
+ { name: "createScriptProcessor" },
+ { name: "createStereoPanner" },
+ { name: "createAnalyser" },
+ { name: "createGain" },
+ { name: "createDelay" },
+ { name: "createBiquadFilter" },
+ { name: "createWaveShaper" },
+ { name: "createPanner" },
+ { name: "createConvolver" },
+ { name: "createChannelSplitter" },
+ { name: "createChannelMerger" },
+ { name: "createDynamicsCompressor" },
+ { name: "createOscillator" },
+ {
+ name: "createMediaElementSource",
+ args: [new Audio()],
+ onOfflineAudioContext: false,
+ },
+ {
+ name: "createMediaStreamSource",
+ args: [new AudioContext().createMediaStreamDestination().stream],
+ onOfflineAudioContext: false,
+ },
+ ].forEach(function (e) {
+ if (
+ e.onOfflineAudioContext == false &&
+ ctx instanceof OfflineAudioContext
+ ) {
+ return;
+ }
+
+ try {
+ ctx[e.name].apply(ctx, e.args);
+ } catch (err) {
+ assert_true(false, "unexpected exception thrown for " + e.name);
+ }
+ });
+}
+
+function loadFile(url, callback) {
+ return new Promise((resolve) => {
+ var xhr = new XMLHttpRequest();
+ xhr.open("GET", url, true);
+ xhr.responseType = "arraybuffer";
+ xhr.onload = function () {
+ resolve(xhr.response);
+ };
+ xhr.send();
+ });
+}
+
+// createBuffer, createPeriodicWave and decodeAudioData should work on a context
+// that has `state` == "closed"
+async function tryLegalOpeerationsOnClosedContext(ctx) {
+ assert_equals(ctx.state, "closed", "The context is in closed state");
+
+ [
+ { name: "createBuffer", args: [1, 44100, 44100] },
+ {
+ name: "createPeriodicWave",
+ args: [new Float32Array(10), new Float32Array(10)],
+ },
+ ].forEach(function (e) {
+ try {
+ ctx[e.name].apply(ctx, e.args);
+ } catch (err) {
+ assert_true(false, "unexpected exception thrown");
+ }
+ });
+ var buf = await loadFile("/webaudio/resources/sin_440Hz_-6dBFS_1s.wav");
+ return ctx
+ .decodeAudioData(buf)
+ .then(function (decodedBuf) {
+ assert_true(
+ true,
+ "decodeAudioData on a closed context should work, it did."
+ );
+ })
+ .catch(function (e) {
+ assert_true(
+ false,
+ "decodeAudioData on a closed context should work, it did not"
+ );
+ });
+}
+
+// Test that MediaStreams that are the output of a suspended AudioContext are
+// producing silence
+// ac1 produce a sine fed to a MediaStreamAudioDestinationNode
+// ac2 is connected to ac1 with a MediaStreamAudioSourceNode, and check that
+// there is silence when ac1 is suspended
+async function testMultiContextOutput() {
+ var ac1 = new AudioContext(),
+ ac2 = new AudioContext();
+
+ await new Promise((resolve) => (ac1.onstatechange = resolve));
+
+ ac1.onstatechange = null;
+ await ac1.suspend();
+ assert_equals(ac1.state, "suspended", "ac1 is suspended");
+ var osc1 = ac1.createOscillator(),
+ mediaStreamDestination1 = ac1.createMediaStreamDestination();
+
+ var mediaStreamAudioSourceNode2 = ac2.createMediaStreamSource(
+ mediaStreamDestination1.stream
+ ),
+ sp2 = ac2.createScriptProcessor(),
+ silentBuffersInARow = 0;
+
+ osc1.connect(mediaStreamDestination1);
+ mediaStreamAudioSourceNode2.connect(sp2);
+ osc1.start();
+
+ let e = await new Promise((resolve) => (sp2.onaudioprocess = resolve));
+
+ while (true) {
+ let e = await new Promise(
+ (resolve) => (sp2.onaudioprocess = resolve)
+ );
+ var input = e.inputBuffer.getChannelData(0);
+ var silent = true;
+ for (var i = 0; i < input.length; i++) {
+ if (input[i] != 0.0) {
+ silent = false;
+ }
+ }
+
+ if (silent) {
+ silentBuffersInARow++;
+ if (silentBuffersInARow == 10) {
+ assert_true(
+ true,
+ "MediaStreams produce silence when their input is blocked."
+ );
+ break;
+ }
+ } else {
+ assert_equals(
+ silentBuffersInARow,
+ 0,
+ "No non silent buffer inbetween silent buffers."
+ );
+ }
+ }
+
+ sp2.onaudioprocess = null;
+ ac1.close();
+ ac2.close();
+}
+
+// Test that there is no buffering between contexts when connecting a running
+// AudioContext to a suspended AudioContext. Gecko's ScriptProcessorNode does some
+// buffering internally, so we ensure this by using a very very low frequency
+// on a sine, and oberve that the phase has changed by a big enough margin.
+async function testMultiContextInput() {
+ var ac1 = new AudioContext(),
+ ac2 = new AudioContext();
+
+ await new Promise((resolve) => (ac1.onstatechange = resolve));
+ ac1.onstatechange = null;
+
+ var osc1 = ac1.createOscillator(),
+ mediaStreamDestination1 = ac1.createMediaStreamDestination(),
+ sp1 = ac1.createScriptProcessor();
+
+ var mediaStreamAudioSourceNode2 = ac2.createMediaStreamSource(
+ mediaStreamDestination1.stream
+ ),
+ sp2 = ac2.createScriptProcessor(),
+ eventReceived = 0;
+
+ osc1.frequency.value = 0.0001;
+ osc1.connect(mediaStreamDestination1);
+ osc1.connect(sp1);
+ mediaStreamAudioSourceNode2.connect(sp2);
+ osc1.start();
+
+ var e = await new Promise((resolve) => (sp2.onaudioprocess = resolve));
+ var inputBuffer1 = e.inputBuffer.getChannelData(0);
+ sp2.value = inputBuffer1[inputBuffer1.length - 1];
+ await ac2.suspend();
+ await ac2.resume();
+
+ while (true) {
+ var e = await new Promise(
+ (resolve) => (sp2.onaudioprocess = resolve)
+ );
+ var inputBuffer = e.inputBuffer.getChannelData(0);
+ if (eventReceived++ == 3) {
+ var delta = Math.abs(inputBuffer[1] - sp2.value),
+ theoreticalIncrement =
+ (2048 * 3 * Math.PI * 2 * osc1.frequency.value) /
+ ac1.sampleRate;
+ assert_true(
+ delta >= theoreticalIncrement,
+ "Buffering did not occur when the context was suspended (delta:" +
+ delta +
+ " increment: " +
+ theoreticalIncrement +
+ ")"
+ );
+ break;
+ }
+ }
+ ac1.close();
+ ac2.close();
+ sp1.onaudioprocess = null;
+ sp2.onaudioprocess = null;
+}
+
+// Take an AudioContext, make sure it switches to running when the audio starts
+// flowing, and then, call suspend, resume and close on it, tracking its state.
+async function testAudioContext() {
+ var ac = new AudioContext();
+ assert_equals(
+ ac.state,
+ "suspended",
+ "AudioContext should start in suspended state."
+ );
+ var stateTracker = {
+ previous: ac.state,
+ // no promise for the initial suspended -> running
+ initial: { handler: false },
+ suspend: { promise: false, handler: false },
+ resume: { promise: false, handler: false },
+ close: { promise: false, handler: false },
+ };
+
+ await new Promise((resolve) => (ac.onstatechange = resolve));
+
+ assert_true(
+ stateTracker.previous == "suspended" && ac.state == "running",
+ 'AudioContext should switch to "running" when the audio hardware is' +
+ " ready."
+ );
+
+ stateTracker.previous = ac.state;
+ stateTracker.initial.handler = true;
+
+ let promise_statechange_suspend = new Promise((resolve) => {
+ ac.onstatechange = resolve;
+ }).then(() => {
+ stateTracker.suspend.handler = true;
+ });
+ await ac.suspend();
+ assert_true(
+ !stateTracker.suspend.handler,
+ "Promise should be resolved before the callback."
+ );
+ assert_equals(
+ ac.state,
+ "suspended",
+ 'AudioContext should switch to "suspended" when the audio stream is ' +
+ "suspended."
+ );
+ await promise_statechange_suspend;
+ stateTracker.previous = ac.state;
+
+ let promise_statechange_resume = new Promise((resolve) => {
+ ac.onstatechange = resolve;
+ }).then(() => {
+ stateTracker.resume.handler = true;
+ });
+ await ac.resume();
+ assert_true(
+ !stateTracker.resume.handler,
+ "Promise should be resolved before the callback."
+ );
+ assert_equals(
+ ac.state,
+ "running",
+ 'AudioContext should switch to "running" when the audio stream is ' +
+ "resumed."
+ );
+ await promise_statechange_resume;
+ stateTracker.previous = ac.state;
+
+ let promise_statechange_close = new Promise((resolve) => {
+ ac.onstatechange = resolve;
+ }).then(() => {
+ stateTracker.close.handler = true;
+ });
+ await ac.close();
+ assert_true(
+ !stateTracker.close.handler,
+ "Promise should be resolved before the callback."
+ );
+ assert_equals(
+ ac.state,
+ "closed",
+ 'AudioContext should switch to "closed" when the audio stream is ' +
+ "closed."
+ );
+ await promise_statechange_close;
+ stateTracker.previous = ac.state;
+
+ tryToCreateNodeOnClosedContext(ac);
+ await tryLegalOpeerationsOnClosedContext(ac);
+}
+
+async function testOfflineAudioContext() {
+ var o = new OfflineAudioContext(1, 44100, 44100);
+ assert_equals(
+ o.state,
+ "suspended",
+ "OfflineAudioContext should start in suspended state."
+ );
+
+ var previousState = o.state,
+ finishedRendering = false;
+
+ o.startRendering().then(function (buffer) {
+ finishedRendering = true;
+ });
+
+ await new Promise((resolve) => (o.onstatechange = resolve));
+
+ assert_true(
+ previousState == "suspended" && o.state == "running",
+ "onstatechanged" +
+ "handler is called on state changed, and the new state is running"
+ );
+ previousState = o.state;
+ await new Promise((resolve) => (o.onstatechange = resolve));
+ assert_true(
+ previousState == "running" && o.state == "closed",
+ "onstatechanged handler is called when rendering finishes, " +
+ "and the new state is closed"
+ );
+ assert_true(
+ finishedRendering,
+ "The Promise that is resolved when the rendering is " +
+ "done should be resolved earlier than the state change."
+ );
+ previousState = o.state;
+ function afterRenderingFinished() {
+ assert_true(
+ false,
+ "There should be no transition out of the closed state."
+ );
+ }
+ o.onstatechange = afterRenderingFinished;
+
+ tryToCreateNodeOnClosedContext(o);
+ await tryLegalOpeerationsOnClosedContext(o);
+}
+
+async function testSuspendResumeEventLoop() {
+ var ac = new AudioContext();
+ var source = ac.createBufferSource();
+ source.buffer = ac.createBuffer(1, 44100, 44100);
+ await new Promise((resolve) => (ac.onstatechange = resolve));
+ ac.onstatechange = null;
+ assert_true(ac.state == "running", "initial state is running");
+ await ac.suspend();
+ source.start();
+ ac.resume();
+ await new Promise((resolve) => (source.onended = resolve));
+ assert_true(true, "The AudioContext did resume");
+}
+
+function testResumeInStateChangeForResumeCallback() {
+ return new Promise((resolve) => {
+ var ac = new AudioContext();
+ ac.onstatechange = function () {
+ ac.resume().then(() => {
+ assert_true(true, "resume promise resolved as expected.");
+ resolve();
+ });
+ };
+ });
+}
+
+var tests = [
+ testOfflineAudioContext,
+ testMultiContextOutput,
+ testMultiContextInput,
+ testSuspendResumeEventLoop,
+ testResumeInStateChangeForResumeCallback,
+ testAudioContext,
+];
+
+tests.forEach(function (f) {
+ promise_test(f, f.name);
+});
+ </script>
+ </head>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html
new file mode 100644
index 0000000000..ff3daebf39
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html
@@ -0,0 +1,145 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioContext.suspend() and AudioContext.resume()
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let offlineContext;
+ let osc;
+ let p1;
+ let p2;
+ let p3;
+
+ let sampleRate = 44100;
+ let durationInSeconds = 1;
+
+ let audit = Audit.createTaskRunner();
+
+ // Task: test suspend().
+ audit.define(
+ {
+ label: 'test-suspend',
+ description: 'Test suspend() for offline context'
+ },
+ function(task, should) {
+ // Test suspend/resume. Ideally this test is best with a online
+ // AudioContext, but content shell doesn't really have a working
+ // online AudioContext. Hence, use an OfflineAudioContext. Not all
+ // possible scenarios can be easily checked with an offline context
+ // instead of an online context.
+
+ // Create an audio context with an oscillator.
+ should(
+ () => {
+ offlineContext = new OfflineAudioContext(
+ 1, durationInSeconds * sampleRate, sampleRate);
+ },
+ 'offlineContext = new OfflineAudioContext(1, ' +
+ (durationInSeconds * sampleRate) + ', ' + sampleRate + ')')
+ .notThrow();
+ osc = offlineContext.createOscillator();
+ osc.connect(offlineContext.destination);
+
+ // Verify the state.
+ should(offlineContext.state, 'offlineContext.state')
+ .beEqualTo('suspended');
+
+ // Multiple calls to suspend() should not be a problem. But we can't
+ // test that on an offline context. Thus, check that suspend() on
+ // an OfflineAudioContext rejects the promise.
+ should(
+ () => p1 = offlineContext.suspend(),
+ 'p1 = offlineContext.suspend()')
+ .notThrow();
+ should(p1 instanceof Promise, 'p1 instanceof Promise').beTrue();
+
+ should(p1, 'p1').beRejected().then(task.done.bind(task));
+ });
+
+
+ // Task: test resume().
+ audit.define(
+ {
+ label: 'test-resume',
+ description: 'Test resume() for offline context'
+ },
+ function(task, should) {
+ // Multiple calls to resume should not be a problem. But we can't
+ // test that on an offline context. Thus, check that resume() on an
+ // OfflineAudioContext rejects the promise.
+ should(
+ () => p2 = offlineContext.resume(),
+ 'p2 = offlineContext.resume()')
+ .notThrow();
+ should(p2 instanceof Promise, 'p2 instanceof Promise').beTrue();
+
+ // Resume doesn't actually resume an offline context
+ should(offlineContext.state, 'After resume, offlineContext.state')
+ .beEqualTo('suspended');
+ should(p2, 'p2').beRejected().then(task.done.bind(task));
+ });
+
+ // Task: test the state after context closed.
+ audit.define(
+ {
+ label: 'test-after-close',
+ description: 'Test state after context closed'
+ },
+ function(task, should) {
+ // Render the offline context.
+ osc.start();
+
+ // Test suspend/resume in tested promise pattern. We don't care
+ // about the actual result of the offline rendering.
+ should(
+ () => p3 = offlineContext.startRendering(),
+ 'p3 = offlineContext.startRendering()')
+ .notThrow();
+
+ p3.then(() => {
+ should(offlineContext.state, 'After close, offlineContext.state')
+ .beEqualTo('closed');
+
+ // suspend() should be rejected on a closed context.
+ should(offlineContext.suspend(), 'offlineContext.suspend()')
+ .beRejected()
+ .then(() => {
+ // resume() should be rejected on closed context.
+ should(offlineContext.resume(), 'offlineContext.resume()')
+ .beRejected()
+ .then(task.done.bind(task));
+ })
+ });
+ });
+
+ audit.define(
+ {
+ label: 'resume-running-context',
+ description: 'Test resuming a running context'
+ },
+ (task, should) => {
+ let context;
+ should(() => context = new AudioContext(), 'Create online context')
+ .notThrow();
+
+ should(context.state, 'context.state').beEqualTo('suspended');
+ should(context.resume(), 'context.resume')
+ .beResolved()
+ .then(() => {
+ should(context.state, 'context.state after resume')
+ .beEqualTo('running');
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html
new file mode 100644
index 0000000000..136abedaa8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/audiocontextoptions.html
@@ -0,0 +1,215 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioContextOptions
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+ let defaultLatency;
+ let interactiveLatency;
+ let balancedLatency;
+ let playbackLatency;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test-audiocontextoptions-latencyHint-basic',
+ description: 'Test creating contexts with basic latencyHint types.'
+ },
+ function(task, should) {
+ let closingPromises = [];
+
+ // Verify that an AudioContext can be created with default options.
+ should(function() {
+ context = new AudioContext()
+ }, 'context = new AudioContext()').notThrow();
+
+ should(context.sampleRate,
+ `context.sampleRate (${context.sampleRate} Hz)`).beGreaterThan(0);
+
+ defaultLatency = context.baseLatency;
+ should(defaultLatency, 'default baseLatency').beGreaterThanOrEqualTo(0);
+
+ // Verify that an AudioContext can be created with the expected
+ // latency types.
+ should(
+ function() {
+ context = new AudioContext({'latencyHint': 'interactive'})
+ },
+ 'context = new AudioContext({\'latencyHint\': \'interactive\'})')
+ .notThrow();
+
+ interactiveLatency = context.baseLatency;
+ should(interactiveLatency, 'interactive baseLatency')
+ .beEqualTo(defaultLatency);
+ closingPromises.push(context.close());
+
+ should(
+ function() {
+ context = new AudioContext({'latencyHint': 'balanced'})
+ },
+ 'context = new AudioContext({\'latencyHint\': \'balanced\'})')
+ .notThrow();
+
+ balancedLatency = context.baseLatency;
+ should(balancedLatency, 'balanced baseLatency')
+ .beGreaterThanOrEqualTo(interactiveLatency);
+ closingPromises.push(context.close());
+
+ should(
+ function() {
+ context = new AudioContext({'latencyHint': 'playback'})
+ },
+ 'context = new AudioContext({\'latencyHint\': \'playback\'})')
+ .notThrow();
+
+ playbackLatency = context.baseLatency;
+ should(playbackLatency, 'playback baseLatency')
+ .beGreaterThanOrEqualTo(balancedLatency);
+ closingPromises.push(context.close());
+
+ Promise.all(closingPromises).then(function() {
+ task.done();
+ });
+ });
+
+ audit.define(
+ {
+ label: 'test-audiocontextoptions-latencyHint-double',
+ description:
+ 'Test creating contexts with explicit latencyHint values.'
+ },
+ function(task, should) {
+ let closingPromises = [];
+
+ // Verify too small exact latency clamped to 'interactive'
+ should(
+ function() {
+ context =
+ new AudioContext({'latencyHint': interactiveLatency / 2})
+ },
+ 'context = new AudioContext({\'latencyHint\': ' +
+ 'interactiveLatency/2})')
+ .notThrow();
+ should(context.baseLatency, 'double-constructor baseLatency small')
+ .beLessThanOrEqualTo(interactiveLatency);
+ closingPromises.push(context.close());
+
+ // Verify that exact latency in range works as expected
+ let validLatency = (interactiveLatency + playbackLatency) / 2;
+ should(
+ function() {
+ context = new AudioContext({'latencyHint': validLatency})
+ },
+ 'context = new AudioContext({\'latencyHint\': validLatency})')
+ .notThrow();
+ should(
+ context.baseLatency, 'double-constructor baseLatency inrange 1')
+ .beGreaterThanOrEqualTo(interactiveLatency);
+ should(
+ context.baseLatency, 'double-constructor baseLatency inrange 2')
+ .beLessThanOrEqualTo(playbackLatency);
+ closingPromises.push(context.close());
+
+ // Verify too big exact latency clamped to some value
+ let context1;
+ let context2;
+ should(function() {
+ context1 =
+ new AudioContext({'latencyHint': playbackLatency * 10});
+ context2 =
+ new AudioContext({'latencyHint': playbackLatency * 20});
+ }, 'creating two high latency contexts').notThrow();
+ should(context1.baseLatency, 'high latency context baseLatency')
+ .beEqualTo(context2.baseLatency);
+ should(context1.baseLatency, 'high latency context baseLatency')
+ .beGreaterThanOrEqualTo(interactiveLatency);
+ closingPromises.push(context1.close());
+ closingPromises.push(context2.close());
+
+ // Verify that invalid latencyHint values are rejected.
+ should(
+ function() {
+ context = new AudioContext({'latencyHint': 'foo'})
+ },
+ 'context = new AudioContext({\'latencyHint\': \'foo\'})')
+ .throw(TypeError);
+
+ // Verify that no extra options can be passed into the
+ // AudioContextOptions.
+ should(
+ function() {
+ context = new AudioContext('latencyHint')
+ },
+ 'context = new AudioContext(\'latencyHint\')')
+ .throw(TypeError);
+
+ Promise.all(closingPromises).then(function() {
+ task.done();
+ });
+ });
+
+ audit.define(
+ {
+ label: 'test-audiocontextoptions-sampleRate',
+ description:
+ 'Test creating contexts with non-default sampleRate values.'
+ },
+ function(task, should) {
+ // A sampleRate of 1 is unlikely to be supported on any browser,
+ // test that this rate is rejected.
+ should(
+ () => {
+ context = new AudioContext({sampleRate: 1})
+ },
+ 'context = new AudioContext({sampleRate: 1})')
+ .throw(DOMException, 'NotSupportedError');
+
+ // A sampleRate of 1,000,000 is unlikely to be supported on any
+ // browser, test that this rate is also rejected.
+ should(
+ () => {
+ context = new AudioContext({sampleRate: 1000000})
+ },
+ 'context = new AudioContext({sampleRate: 1000000})')
+ .throw(DOMException, 'NotSupportedError');
+ // A negative sample rate should not be accepted
+ should(
+ () => {
+ context = new AudioContext({sampleRate: -1})
+ },
+ 'context = new AudioContext({sampleRate: -1})')
+ .throw(DOMException, 'NotSupportedError');
+ // A null sample rate should not be accepted
+ should(
+ () => {
+ context = new AudioContext({sampleRate: 0})
+ },
+ 'context = new AudioContext({sampleRate: 0})')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ () => {
+ context = new AudioContext({sampleRate: 24000})
+ },
+ 'context = new AudioContext({sampleRate: 24000})')
+ .notThrow();
+ should(
+ context.sampleRate, 'sampleRate inrange')
+ .beEqualTo(24000);
+
+ context.close();
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html
new file mode 100644
index 0000000000..f866b5f7a1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/constructor-allowed-to-start.html
@@ -0,0 +1,25 @@
+<!doctype html>
+<title>AudioContext state around "allowed to start" in constructor</title>
+<link rel=help href=https://webaudio.github.io/web-audio-api/#dom-audiocontext-audiocontext>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script src=/resources/testdriver.js></script>
+<script src=/resources/testdriver-vendor.js></script>
+<script>
+setup({ single_test: true });
+test_driver.bless("audio playback", () => {
+ const ctx = new AudioContext();
+ // Immediately after the constructor the state is "suspended" because the
+ // control message to start processing has just been sent, but the state
+ // should change soon.
+ assert_equals(ctx.state, "suspended", "initial state");
+ ctx.onstatechange = () => {
+ assert_equals(ctx.state, "running", "state after statechange event");
+ // Now create another context and ensure it starts out in the "suspended"
+ // state too, ensuring it's not synchronously "running".
+ const ctx2 = new AudioContext();
+ assert_equals(ctx2.state, "suspended", "initial state of 2nd context");
+ done();
+ };
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html
new file mode 100644
index 0000000000..8c74bd0aa1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/crashtests/currentTime-after-discard.html
@@ -0,0 +1,14 @@
+<html>
+<head>
+ <title>
+ Test currentTime after browsing context discard
+ </title>
+</head>
+<script>
+ const frame = document.createElement('frame');
+ document.documentElement.appendChild(frame);
+ const ctx = new frame.contentWindow.AudioContext();
+ frame.remove();
+ ctx.currentTime;
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html
new file mode 100644
index 0000000000..e000ab124f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/processing-after-resume.https.html
@@ -0,0 +1,55 @@
+<!doctype html>
+<title>Test consistency of processing after resume()</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+const get_node_and_reply = (context) => {
+ const node = new AudioWorkletNode(context, 'port-processor');
+ return new Promise((resolve) => {
+ node.port.onmessage = (event) => resolve({node: node, reply: event.data});
+ });
+};
+const ping_for_reply = (node) => {
+ return new Promise((resolve) => {
+ node.port.onmessage = (event) => resolve(event.data);
+ node.port.postMessage('ping');
+ });
+};
+const assert_consistent = (constructReply, pong, expectedPongTime, name) => {
+ const blockSize = 128;
+ assert_equals(pong.timeStamp, expectedPongTime, `${name} pong time`);
+ assert_equals(pong.processCallCount * blockSize,
+ pong.currentFrame - constructReply.currentFrame,
+ `${name} processed frame count`);
+};
+const modulePath = '/webaudio/the-audio-api/' +
+ 'the-audioworklet-interface/processors/port-processor.js';
+
+promise_test(async () => {
+ const realtime = new AudioContext();
+ await realtime.audioWorklet.addModule(modulePath);
+ await realtime.suspend();
+ const timeBeforeResume = realtime.currentTime;
+ // Two AudioWorkletNodes are constructed.
+ // node1 is constructed before and node2 after the resume() call.
+ const construct1 = get_node_and_reply(realtime);
+ const resume = realtime.resume();
+ const construct2 = get_node_and_reply(realtime);
+ const {node: node1, reply: constructReply1} = await construct1;
+ assert_equals(constructReply1.timeStamp, timeBeforeResume,
+ 'construct time before resume');
+ const {node: node2, reply: constructReply2} = await construct2;
+ assert_greater_than_equal(constructReply2.timeStamp, timeBeforeResume,
+ 'construct time after resume');
+ await resume;
+ // Suspend the context to freeze time and check that the processing for each
+ // node matches the elapsed time.
+ await realtime.suspend();
+ const timeAfterSuspend = realtime.currentTime;
+ const pong1 = await ping_for_reply(node1);
+ const pong2 = await ping_for_reply(node2);
+ assert_consistent(constructReply1, pong1, timeAfterSuspend, 'node1');
+ assert_consistent(constructReply2, pong2, timeAfterSuspend, 'node2');
+ assert_equals(pong1.currentFrame, pong2.currentFrame, 'currentFrame matches');
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html
new file mode 100644
index 0000000000..2fb3c5a50b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/promise-methods-after-discard.html
@@ -0,0 +1,28 @@
+<!doctype html>
+<title>Test for rejected promises from methods on an AudioContext in a
+ discarded browsing context</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<body></body>
+<script>
+let context;
+let childDOMException;
+setup(() => {
+ const frame = document.createElement('iframe');
+ document.body.appendChild(frame);
+ context = new frame.contentWindow.AudioContext();
+ childDOMException = frame.contentWindow.DOMException;
+ frame.remove();
+});
+
+promise_test((t) => promise_rejects_dom(t, 'InvalidStateError',
+ childDOMException, context.suspend()),
+ 'suspend()');
+promise_test((t) => promise_rejects_dom(t, 'InvalidStateError',
+ childDOMException, context.resume()),
+ 'resume()');
+promise_test((t) => promise_rejects_dom(t, 'InvalidStateError',
+ childDOMException, context.close()),
+ 'close()');
+// decodeAudioData() is tested in audiocontext-detached-execution-context.html
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html
new file mode 100644
index 0000000000..2654a2a504
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/resources/not-fully-active-helper.sub.html
@@ -0,0 +1,22 @@
+<!doctype html>
+<html>
+<iframe src="{{GET[childsrc]}}">
+</iframe>
+<script>
+const frame = document.getElementsByTagName('iframe')[0];
+const reply = op => window.parent.postMessage('DONE ' + op, '*');
+
+window.onmessage = e => {
+ switch (e.data) {
+ case 'REMOVE FRAME':
+ frame.remove();
+ reply(e.data);
+ break;
+ case 'NAVIGATE FRAME':
+ frame.srcdoc = '<html></html>';
+ frame.onload = () => reply(e.data);
+ break;
+ }
+};
+</script>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html
new file mode 100644
index 0000000000..596a825c3d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-after-construct.html
@@ -0,0 +1,72 @@
+<!doctype html>
+<title>Test AudioContext state updates with suspend() shortly after
+ construction</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+// A separate async_test is used for tracking state change counts so that it
+// can report excess changes after the promise_test for the iteration has
+// completed.
+const changeCountingTest = async_test('State change counting');
+
+const doTest = async (testCount) => {
+ const ctx = new AudioContext();
+ // Explicitly resume to get a promise to indicate whether the context
+ // successfully started running.
+ const resume = ctx.resume();
+ const suspend = ctx.suspend();
+ let stateChangesDone = new Promise((resolve) => {
+ ctx.onstatechange = () => {
+ ++ctx.stateChangeCount;
+ changeCountingTest.step(() => {
+ assert_less_than_equal(ctx.stateChangeCount,
+ ctx.expectedStateChangeCount,
+ `ctx ${testCount} state change count.`);
+ assert_equals(ctx.state, ctx.expectedState, `ctx ${testCount} state`);
+ });
+ if (ctx.stateChangeCount == ctx.totalStateChangeCount) {
+ resolve();
+ }
+ };
+ });
+ ctx.stateChangeCount = 0;
+ ctx.expectedStateChangeCount = 1;
+ ctx.expectedState = 'running';
+ ctx.totalStateChangeCount = 2;
+ let resumeState = 'pending';
+ resume.then(() => {
+ resumeState = 'fulfilled';
+ assert_equals(ctx.state, 'running', 'state on resume fulfilled.');
+ }).catch(() => {
+ // The resume() promise may be rejected if "Attempt to acquire system
+ // resources" fails. The spec does not discuss the possibility of a
+ // subsequent suspend causing such a failure, but accept this as a
+ // reasonable behavior.
+ resumeState = 'rejected';
+ assert_equals(ctx.state, 'suspended', 'state on resume rejected.');
+ assert_equals(ctx.stateChangeCount, 0);
+ ctx.expectedStateChangeCount = 0;
+ stateChangesDone = Promise.resolve();
+ });
+ suspend.then(() => {
+ assert_not_equals(resumeState, 'pending',
+ 'resume promise should settle before suspend promise.')
+ if (resumeState == 'fulfilled') {
+ ++ctx.expectedStateChangeCount;
+ }
+ ctx.expectedState = 'suspended';
+ assert_equals(ctx.state, 'suspended', 'state on suspend fulfilled.');
+ });
+ await resume;
+ await suspend;
+ await stateChangesDone;
+};
+
+// Repeat the test because Gecko uses different code when there is more than
+// one AudioContext. The third run provides time to check that no further
+// state changes from the second run are pending.
+for (const testCount of [1, 2, 3]) {
+ promise_test(() => { return doTest(testCount); }, `Iteration ${testCount}`);
+}
+promise_test(async () => changeCountingTest.done(), 'Stop waiting');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html
new file mode 100644
index 0000000000..b9328ae95d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/suspend-with-navigation.html
@@ -0,0 +1,65 @@
+<!doctype html>
+<meta name="timeout" content="long">
+<title>AudioContext.suspend() with navigation</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/common/utils.js"></script>
+<script src="/common/dispatcher/dispatcher.js"></script>
+<script src="/html/browsers/browsing-the-web/back-forward-cache/resources/helper.sub.js"></script>
+<script>
+'use strict';
+runBfcacheTest({
+ funcBeforeNavigation: async () => {
+ window.promise_event = (target, name) => {
+ return new Promise(resolve => target[`on${name}`] = resolve);
+ };
+ window.promise_source_ended = (audioCtx) => {
+ const source = new ConstantSourceNode(audioCtx);
+ source.start(0);
+ source.stop(audioCtx.currentTime + 1/audioCtx.sampleRate);
+ return promise_event(source, "ended");
+ };
+
+ window.suspended_ctx = new AudioContext();
+ // Perform the equivalent of test_driver.bless() to request a user gesture
+ // for when the test is run from a browser. test_driver would need to be
+ // able to postMessage() to the test context, which is not available due
+ // to window.open() being called with noopener (for back/forward cache).
+ // Audio autoplay is expected to be allowed when run through webdriver
+ // from `wpt run`.
+ let button = document.createElement('button');
+ button.innerHTML = 'This test requires user interaction.<br />' +
+ 'Please click here to allow AudioContext.';
+ document.body.appendChild(button);
+ button.addEventListener('click', () => {
+ document.body.removeChild(button);
+ suspended_ctx.resume();
+ }, {once: true});
+ // Wait for user gesture, if required.
+ await suspended_ctx.resume();
+ await suspended_ctx.suspend();
+ window.ended_promise = promise_source_ended(suspended_ctx);
+ },
+ funcAfterAssertion: async (pageA) => {
+ const state = await pageA.execute_script(() => suspended_ctx.state);
+ assert_equals(state, 'suspended', 'state after back()');
+ const first_ended = await pageA.execute_script(async () => {
+ // Wait for an ended event from a running AudioContext to provide enough
+ // time to check that the ended event has not yet been dispatched from
+ // the suspended ctx.
+ const running_ctx = new AudioContext();
+ await running_ctx.resume();
+ return Promise.race([
+ ended_promise.then(() => 'suspended_ctx'),
+ promise_source_ended(running_ctx).then(() => 'running_ctx'),
+ ]);
+ });
+ assert_equals(first_ended, 'running_ctx',
+ 'AudioContext of first ended event');
+ await pageA.execute_script(() => {
+ window.suspended_ctx.resume();
+ return ended_promise;
+ });
+ },
+}, 'suspend() with navigation');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html
new file mode 100644
index 0000000000..9067e6869b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-channel-rules.html
@@ -0,0 +1,278 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audionode-channel-rules.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/mixing-rules.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let context = 0;
+ // Use a power of two to eliminate round-off converting frames to time.
+ let sampleRate = 32768;
+ let renderNumberOfChannels = 8;
+ let singleTestFrameLength = 8;
+ let testBuffers;
+
+ // A list of connections to an AudioNode input, each of which is to be
+ // used in one or more specific test cases. Each element in the list is a
+ // string, with the number of connections corresponding to the length of
+ // the string, and each character in the string is from '1' to '8'
+ // representing a 1 to 8 channel connection (from an AudioNode output).
+
+ // For example, the string "128" means 3 connections, having 1, 2, and 8
+ // channels respectively.
+
+ let connectionsList = [
+ '1', '2', '3', '4', '5', '6', '7', '8', '11', '12', '14', '18', '111',
+ '122', '123', '124', '128'
+ ];
+
+ // A list of mixing rules, each of which will be tested against all of the
+ // connections in connectionsList.
+ let mixingRulesList = [
+ {
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ },
+ {
+ channelCount: 4,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'speakers'
+ },
+
+ // Test up-down-mix to some explicit speaker layouts.
+ {
+ channelCount: 1,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'speakers'
+ },
+ {
+ channelCount: 2,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'speakers'
+ },
+ {
+ channelCount: 4,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'speakers'
+ },
+ {
+ channelCount: 6,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'speakers'
+ },
+
+ {
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'discrete'
+ },
+ {
+ channelCount: 4,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'discrete'
+ },
+ {
+ channelCount: 4,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'discrete'
+ },
+ {
+ channelCount: 8,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'discrete'
+ },
+ ];
+
+ let numberOfTests = mixingRulesList.length * connectionsList.length;
+
+ // Print out the information for an individual test case.
+ function printTestInformation(
+ testNumber, actualBuffer, expectedBuffer, frameLength, frameOffset) {
+ let actual = stringifyBuffer(actualBuffer, frameLength);
+ let expected =
+ stringifyBuffer(expectedBuffer, frameLength, frameOffset);
+ debug('TEST CASE #' + testNumber + '\n');
+ debug('actual channels:\n' + actual);
+ debug('expected channels:\n' + expected);
+ }
+
+ function scheduleTest(
+ testNumber, connections, channelCount, channelCountMode,
+ channelInterpretation) {
+ let mixNode = context.createGain();
+ mixNode.channelCount = channelCount;
+ mixNode.channelCountMode = channelCountMode;
+ mixNode.channelInterpretation = channelInterpretation;
+ mixNode.connect(context.destination);
+
+ for (let i = 0; i < connections.length; ++i) {
+ let connectionNumberOfChannels =
+ connections.charCodeAt(i) - '0'.charCodeAt(0);
+
+ let source = context.createBufferSource();
+ // Get a buffer with the right number of channels, converting from
+ // 1-based to 0-based index.
+ let buffer = testBuffers[connectionNumberOfChannels - 1];
+ source.buffer = buffer;
+ source.connect(mixNode);
+
+ // Start at the right offset.
+ let sampleFrameOffset = testNumber * singleTestFrameLength;
+ let time = sampleFrameOffset / sampleRate;
+ source.start(time);
+ }
+ }
+
+ function checkTestResult(
+ renderedBuffer, testNumber, connections, channelCount,
+ channelCountMode, channelInterpretation, should) {
+ let s = 'connections: ' + connections + ', ' + channelCountMode;
+
+ // channelCount is ignored in "max" mode.
+ if (channelCountMode == 'clamped-max' ||
+ channelCountMode == 'explicit') {
+ s += '(' + channelCount + ')';
+ }
+
+ s += ', ' + channelInterpretation;
+
+ let computedNumberOfChannels = computeNumberOfChannels(
+ connections, channelCount, channelCountMode);
+
+ // Create a zero-initialized silent AudioBuffer with
+ // computedNumberOfChannels.
+ let destBuffer = context.createBuffer(
+ computedNumberOfChannels, singleTestFrameLength,
+ context.sampleRate);
+
+ // Mix all of the connections into the destination buffer.
+ for (let i = 0; i < connections.length; ++i) {
+ let connectionNumberOfChannels =
+ connections.charCodeAt(i) - '0'.charCodeAt(0);
+ let sourceBuffer =
+ testBuffers[connectionNumberOfChannels - 1]; // convert from
+ // 1-based to
+ // 0-based index
+
+ if (channelInterpretation == 'speakers') {
+ speakersSum(sourceBuffer, destBuffer);
+ } else if (channelInterpretation == 'discrete') {
+ discreteSum(sourceBuffer, destBuffer);
+ } else {
+ alert('Invalid channel interpretation!');
+ }
+ }
+
+ // Use this when debugging mixing rules.
+ // printTestInformation(testNumber, renderedBuffer, destBuffer,
+ // singleTestFrameLength, sampleFrameOffset);
+
+ // Validate that destBuffer matches the rendered output. We need to
+ // check the rendered output at a specific sample-frame-offset
+ // corresponding to the specific test case we're checking for based on
+ // testNumber.
+
+ let sampleFrameOffset = testNumber * singleTestFrameLength;
+ for (let c = 0; c < renderNumberOfChannels; ++c) {
+ let renderedData = renderedBuffer.getChannelData(c);
+ for (let frame = 0; frame < singleTestFrameLength; ++frame) {
+ let renderedValue = renderedData[frame + sampleFrameOffset];
+
+ let expectedValue = 0;
+ if (c < destBuffer.numberOfChannels) {
+ let expectedData = destBuffer.getChannelData(c);
+ expectedValue = expectedData[frame];
+ }
+
+ // We may need to add an epsilon in the comparison if we add more
+ // test vectors.
+ if (renderedValue != expectedValue) {
+ let message = s + 'rendered: ' + renderedValue +
+ ' expected: ' + expectedValue + ' channel: ' + c +
+ ' frame: ' + frame;
+ // testFailed(s);
+ should(renderedValue, s).beEqualTo(expectedValue);
+ return;
+ }
+ }
+ }
+
+ should(true, s).beTrue();
+ }
+
+ function checkResult(buffer, should) {
+ // Sanity check result.
+ should(buffer.length, 'Rendered number of frames')
+ .beEqualTo(numberOfTests * singleTestFrameLength);
+ should(buffer.numberOfChannels, 'Rendered number of channels')
+ .beEqualTo(renderNumberOfChannels);
+
+ // Check all the tests.
+ let testNumber = 0;
+ for (let m = 0; m < mixingRulesList.length; ++m) {
+ let mixingRules = mixingRulesList[m];
+ for (let i = 0; i < connectionsList.length; ++i, ++testNumber) {
+ checkTestResult(
+ buffer, testNumber, connectionsList[i],
+ mixingRules.channelCount, mixingRules.channelCountMode,
+ mixingRules.channelInterpretation, should);
+ }
+ }
+ }
+
+ audit.define(
+ {label: 'test', description: 'Channel mixing rules for AudioNodes'},
+ function(task, should) {
+
+ // Create 8-channel offline audio context. Each test will render 8
+ // sample-frames starting at sample-frame position testNumber * 8.
+ let totalFrameLength = numberOfTests * singleTestFrameLength;
+ context = new OfflineAudioContext(
+ renderNumberOfChannels, totalFrameLength, sampleRate);
+
+ // Set destination to discrete mixing.
+ context.destination.channelCount = renderNumberOfChannels;
+ context.destination.channelCountMode = 'explicit';
+ context.destination.channelInterpretation = 'discrete';
+
+ // Create test buffers from 1 to 8 channels.
+ testBuffers = new Array();
+ for (let i = 0; i < renderNumberOfChannels; ++i) {
+ testBuffers[i] = createShiftedImpulseBuffer(
+ context, i + 1, singleTestFrameLength);
+ }
+
+ // Schedule all the tests.
+ let testNumber = 0;
+ for (let m = 0; m < mixingRulesList.length; ++m) {
+ let mixingRules = mixingRulesList[m];
+ for (let i = 0; i < connectionsList.length; ++i, ++testNumber) {
+ scheduleTest(
+ testNumber, connectionsList[i], mixingRules.channelCount,
+ mixingRules.channelCountMode,
+ mixingRules.channelInterpretation);
+ }
+ }
+
+ // Render then check results.
+ // context.oncomplete = checkResult;
+ context.startRendering().then(buffer => {
+ checkResult(buffer, should);
+ task.done();
+ });
+ ;
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html
new file mode 100644
index 0000000000..02caea667b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-method-chaining.html
@@ -0,0 +1,165 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audionode-connect-method-chaining.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // AudioNode dictionary with associated arguments.
+ let nodeDictionary = [
+ {name: 'Analyser'}, {name: 'BiquadFilter'}, {name: 'BufferSource'},
+ {name: 'ChannelMerger', args: [6]},
+ {name: 'ChannelSplitter', args: [6]}, {name: 'Convolver'},
+ {name: 'Delay', args: []}, {name: 'DynamicsCompressor'}, {name: 'Gain'},
+ {name: 'Oscillator'}, {name: 'Panner'},
+ {name: 'ScriptProcessor', args: [512, 1, 1]}, {name: 'StereoPanner'},
+ {name: 'WaveShaper'}
+ ];
+
+
+ function verifyReturnedNode(should, config) {
+ should(
+ config.destination === config.returned,
+ 'The return value of ' + config.desc + ' matches the destination ' +
+ config.returned.constructor.name)
+ .beEqualTo(true);
+ }
+
+ // Test utility for batch method checking: in order to test 3 method
+ // signatures, so we create 3 dummy destinations.
+ // 1) .connect(GainNode)
+ // 2) .connect(BiquadFilterNode, output)
+ // 3) .connect(ChannelMergerNode, output, input)
+ function testConnectMethod(context, should, options) {
+ let source =
+ context['create' + options.name].apply(context, options.args);
+ let sourceName = source.constructor.name;
+
+ let destination1 = context.createGain();
+ verifyReturnedNode(should, {
+ source: source,
+ destination: destination1,
+ returned: source.connect(destination1),
+ desc: sourceName + '.connect(' + destination1.constructor.name + ')'
+ });
+
+ let destination2 = context.createBiquadFilter();
+ verifyReturnedNode(should, {
+ source: source,
+ destination: destination2,
+ returned: source.connect(destination2, 0),
+ desc:
+ sourceName + '.connect(' + destination2.constructor.name + ', 0)'
+ });
+
+ let destination3 = context.createChannelMerger();
+ verifyReturnedNode(should, {
+ source: source,
+ destination: destination3,
+ returned: source.connect(destination3, 0, 1),
+ desc: sourceName + '.connect(' + destination3.constructor.name +
+ ', 0, 1)'
+ });
+ }
+
+
+ let audit = Audit.createTaskRunner();
+
+ // Task: testing entries from the dictionary.
+ audit.define('from-dictionary', (task, should) => {
+ let context = new AudioContext();
+
+ for (let i = 0; i < nodeDictionary.length; i++)
+ testConnectMethod(context, should, nodeDictionary[i]);
+
+ task.done();
+ });
+
+ // Task: testing Media* nodes.
+ audit.define('media-group', (task, should) => {
+ let context = new AudioContext();
+
+ // Test MediaElementSourceNode needs an <audio> element.
+ let mediaElement = document.createElement('audio');
+ testConnectMethod(
+ context, should,
+ {name: 'MediaElementSource', args: [mediaElement]});
+
+ // MediaStreamAudioDestinationNode has no output so it connect method
+ // chaining isn't possible.
+
+ // MediaStreamSourceNode requires 'stream' object to be constructed,
+ // which is a part of MediaStreamDestinationNode.
+ let streamDestination = context.createMediaStreamDestination();
+ let stream = streamDestination.stream;
+ testConnectMethod(
+ context, should, {name: 'MediaStreamSource', args: [stream]});
+
+ task.done();
+ });
+
+ // Task: test the exception thrown by invalid operation.
+ audit.define('invalid-operation', (task, should) => {
+ let contextA = new AudioContext();
+ let contextB = new AudioContext();
+ let gain1 = contextA.createGain();
+ let gain2 = contextA.createGain();
+
+ // Test if the first connection throws correctly. The first gain node
+ // does not have the second output, so it should throw.
+ should(function() {
+ gain1.connect(gain2, 1).connect(contextA.destination);
+ }, 'Connecting with an invalid output').throw(DOMException, 'IndexSizeError');
+
+ // Test if the second connection throws correctly. The contextB's
+ // destination is not compatible with the nodes from contextA, thus the
+ // first connection succeeds but the second one should throw.
+ should(
+ function() {
+ gain1.connect(gain2).connect(contextB.destination);
+ },
+ 'Connecting to a node from the different context')
+ .throw(DOMException, 'InvalidAccessError');
+
+ task.done();
+ });
+
+ // Task: verify if the method chaining actually works.
+ audit.define('verification', (task, should) => {
+ // We pick the lowest sample rate allowed to run the test efficiently.
+ let context = new OfflineAudioContext(1, 128, 8000);
+
+ let constantBuffer = createConstantBuffer(context, 1, 1.0);
+
+ let source = context.createBufferSource();
+ source.buffer = constantBuffer;
+ source.loop = true;
+
+ let gain1 = context.createGain();
+ gain1.gain.value = 0.5;
+ let gain2 = context.createGain();
+ gain2.gain.value = 0.25;
+
+ source.connect(gain1).connect(gain2).connect(context.destination);
+ source.start();
+
+ context.startRendering()
+ .then(function(buffer) {
+ should(
+ buffer.getChannelData(0),
+ 'The output of chained connection of gain nodes')
+ .beConstantValueOf(0.125);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html
new file mode 100644
index 0000000000..eca15dedfa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-order.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audionode-connect-order.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let sampleRate = 44100.0;
+ let renderLengthSeconds = 0.125;
+ let delayTimeSeconds = 0.1;
+
+ function createSinWaveBuffer(context, lengthInSeconds, frequency) {
+ let audioBuffer =
+ context.createBuffer(1, lengthInSeconds * sampleRate, sampleRate);
+
+ let n = audioBuffer.length;
+ let data = audioBuffer.getChannelData(0);
+
+ for (let i = 0; i < n; ++i) {
+ data[i] = Math.sin(frequency * 2 * Math.PI * i / sampleRate);
+ }
+
+ return audioBuffer;
+ }
+
+ audit.define(
+ {
+ label: 'Test connections',
+ description:
+ 'AudioNode connection order doesn\'t trigger assertion errors'
+ },
+ function(task, should) {
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer =
+ createSinWaveBuffer(context, renderLengthSeconds, 880);
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+ bufferSource.connect(context.destination);
+
+ let delay = context.createDelay();
+ delay.delayTime.value = delayTimeSeconds;
+
+ // We connect delay node to gain node before anything is connected
+ // to delay node itself. We do this because we try to trigger the
+ // ASSERT which might be fired due to AudioNode connection order,
+ // especially when gain node and delay node is involved e.g.
+ // https://bugs.webkit.org/show_bug.cgi?id=76685.
+
+ should(() => {
+ let gain = context.createGain();
+ gain.connect(context.destination);
+ delay.connect(gain);
+ }, 'Connecting nodes').notThrow();
+
+ bufferSource.start(0);
+
+ let promise = context.startRendering();
+
+ should(promise, 'OfflineContext startRendering()')
+ .beResolved()
+ .then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html
new file mode 100644
index 0000000000..3af44fb7af
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html>
+<title>Test the return value of connect when connecting two AudioNodes</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function(t) {
+ var context = new OfflineAudioContext(1, 1, 44100);
+ var g1 = context.createGain();
+ var g2 = context.createGain();
+ var rv = g1.connect(g2);
+ assert_equals(rv, g2);
+ var rv = g1.connect(g2);
+ assert_equals(rv, g2);
+}, "connect should return the node connected to.");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html
new file mode 100644
index 0000000000..0b09edd4a7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html
@@ -0,0 +1,221 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audionode-disconnect-audioparam.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let renderQuantum = 128;
+
+ let sampleRate = 44100;
+ let renderDuration = 0.5;
+ let disconnectTime = 0.5 * renderDuration;
+
+ let audit = Audit.createTaskRunner();
+
+ // Calculate the index for disconnection.
+ function getDisconnectIndex(disconnectTime) {
+ let disconnectIndex = disconnectTime * sampleRate;
+ disconnectIndex = renderQuantum *
+ Math.floor((disconnectIndex + renderQuantum - 1) / renderQuantum);
+ return disconnectIndex;
+ }
+
+ // Get the index of value change.
+ function getValueChangeIndex(array, targetValue) {
+ return array.findIndex(function(element, index) {
+ if (element === targetValue)
+ return true;
+ });
+ }
+
+ // Task 1: test disconnect(AudioParam) method.
+ audit.define('disconnect(AudioParam)', (task, should) => {
+ // Creates a buffer source with value [1] and then connect it to two
+ // gain nodes in series. The output of the buffer source is lowered by
+ // half
+ // (* 0.5) and then connected to two |.gain| AudioParams in each gain
+ // node.
+ //
+ // (1) bufferSource => gain1 => gain2
+ // (2) bufferSource => half => gain1.gain
+ // (3) half => gain2.gain
+ //
+ // This graph should produce the output of 2.25 (= 1 * 1.5 * 1.5). After
+ // disconnecting (3), it should produce 1.5.
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ let source = context.createBufferSource();
+ let buffer1ch = createConstantBuffer(context, 1, 1);
+ let half = context.createGain();
+ let gain1 = context.createGain();
+ let gain2 = context.createGain();
+
+ source.buffer = buffer1ch;
+ source.loop = true;
+ half.gain.value = 0.5;
+
+ source.connect(gain1);
+ gain1.connect(gain2);
+ gain2.connect(context.destination);
+ source.connect(half);
+
+ // Connecting |half| to both |gain1.gain| and |gain2.gain| amplifies the
+ // signal by 2.25 (= 1.5 * 1.5) because each gain node amplifies the
+ // signal by 1.5 (= 1.0 + 0.5).
+ half.connect(gain1.gain);
+ half.connect(gain2.gain);
+
+ source.start();
+
+ // Schedule the disconnection at the half of render duration.
+ context.suspend(disconnectTime).then(function() {
+ half.disconnect(gain2.gain);
+ context.resume();
+ });
+
+ context.startRendering()
+ .then(function(buffer) {
+ let channelData = buffer.getChannelData(0);
+ let disconnectIndex = getDisconnectIndex(disconnectTime);
+ let valueChangeIndex = getValueChangeIndex(channelData, 1.5);
+
+ // Expected values are: 1 * 1.5 * 1.5 -> 1 * 1.5 = [2.25, 1.5]
+ should(channelData, 'Channel #0').containValues([2.25, 1.5]);
+ should(valueChangeIndex, 'The index of value change')
+ .beEqualTo(disconnectIndex);
+ })
+ .then(() => task.done());
+ });
+
+ // Task 2: test disconnect(AudioParam, output) method.
+ audit.define('disconnect(AudioParam, output)', (task, should) => {
+ // Create a 2-channel buffer source with [1, 2] in each channel and
+ // make a serial connection through gain1 and gain 2. The make the
+ // buffer source half with a gain node and connect it to a 2-output
+ // splitter. Connect each output to 2 gain AudioParams respectively.
+ //
+ // (1) bufferSource => gain1 => gain2
+ // (2) bufferSource => half => splitter(2)
+ // (3) splitter#0 => gain1.gain
+ // (4) splitter#1 => gain2.gain
+ //
+ // This graph should produce 3 (= 1 * 1.5 * 2) and 6 (= 2 * 1.5 * 2) for
+ // each channel. After disconnecting (4), it should output 1.5 and 3.
+ let context =
+ new OfflineAudioContext(2, renderDuration * sampleRate, sampleRate);
+ let source = context.createBufferSource();
+ let buffer2ch = createConstantBuffer(context, 1, [1, 2]);
+ let splitter = context.createChannelSplitter(2);
+ let half = context.createGain();
+ let gain1 = context.createGain();
+ let gain2 = context.createGain();
+
+ source.buffer = buffer2ch;
+ source.loop = true;
+ half.gain.value = 0.5;
+
+ source.connect(gain1);
+ gain1.connect(gain2);
+ gain2.connect(context.destination);
+
+ // |source| originally is [1, 2] but it becomes [0.5, 1] after 0.5 gain.
+ // Each splitter's output will be applied to |gain1.gain| and
+ // |gain2.gain| respectively in an additive fashion.
+ source.connect(half);
+ half.connect(splitter);
+
+ // This amplifies the signal by 1.5. (= 1.0 + 0.5)
+ splitter.connect(gain1.gain, 0);
+
+ // This amplifies the signal by 2. (= 1.0 + 1.0)
+ splitter.connect(gain2.gain, 1);
+
+ source.start();
+
+ // Schedule the disconnection at the half of render duration.
+ context.suspend(disconnectTime).then(function() {
+ splitter.disconnect(gain2.gain, 1);
+ context.resume();
+ });
+
+ context.startRendering()
+ .then(function(buffer) {
+ let channelData0 = buffer.getChannelData(0);
+ let channelData1 = buffer.getChannelData(1);
+
+ let disconnectIndex = getDisconnectIndex(disconnectTime);
+ let valueChangeIndexCh0 = getValueChangeIndex(channelData0, 1.5);
+ let valueChangeIndexCh1 = getValueChangeIndex(channelData1, 3);
+
+ // Expected values are: 1 * 1.5 * 2 -> 1 * 1.5 = [3, 1.5]
+ should(channelData0, 'Channel #0').containValues([3, 1.5]);
+ should(
+ valueChangeIndexCh0,
+ 'The index of value change in channel #0')
+ .beEqualTo(disconnectIndex);
+
+ // Expected values are: 2 * 1.5 * 2 -> 2 * 1.5 = [6, 3]
+ should(channelData1, 'Channel #1').containValues([6, 3]);
+ should(
+ valueChangeIndexCh1,
+ 'The index of value change in channel #1')
+ .beEqualTo(disconnectIndex);
+ })
+ .then(() => task.done());
+ });
+
+ // Task 3: exception checks.
+ audit.define('exceptions', (task, should) => {
+ let context = new AudioContext();
+ let gain1 = context.createGain();
+ let splitter = context.createChannelSplitter(2);
+ let gain2 = context.createGain();
+ let gain3 = context.createGain();
+
+ // Connect a splitter to gain nodes and merger so we can test the
+ // possible ways of disconnecting the nodes to verify that appropriate
+ // exceptions are thrown.
+ gain1.connect(splitter);
+ splitter.connect(gain2.gain, 0);
+ splitter.connect(gain3.gain, 1);
+ gain2.connect(gain3);
+ gain3.connect(context.destination);
+
+ // gain1 is not connected to gain3.gain. Exception should be thrown.
+ should(
+ function() {
+ gain1.disconnect(gain3.gain);
+ },
+ 'gain1.disconnect(gain3.gain)')
+ .throw(DOMException, 'InvalidAccessError');
+
+ // When the output index is good but the destination is invalid.
+ should(
+ function() {
+ splitter.disconnect(gain1.gain, 1);
+ },
+ 'splitter.disconnect(gain1.gain, 1)')
+ .throw(DOMException, 'InvalidAccessError');
+
+ // When both arguments are wrong, throw IndexSizeError first.
+ should(
+ function() {
+ splitter.disconnect(gain1.gain, 2);
+ },
+ 'splitter.disconnect(gain1.gain, 2)')
+ .throw(DOMException, 'IndexSizeError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html
new file mode 100644
index 0000000000..65b93222d1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect.html
@@ -0,0 +1,298 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audionode-disconnect.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Task 1: test disconnect() method.
+ audit.define('disconnect()', (task, should) => {
+
+ // Connect a source to multiple gain nodes, each connected to the
+ // destination. Then disconnect the source. The expected output should
+ // be all zeros since the source was disconnected.
+ let context = new OfflineAudioContext(1, 128, 44100);
+ let source = context.createBufferSource();
+ let buffer1ch = createConstantBuffer(context, 128, [1]);
+ let gain1 = context.createGain();
+ let gain2 = context.createGain();
+ let gain3 = context.createGain();
+
+ source.buffer = buffer1ch;
+
+ source.connect(gain1);
+ source.connect(gain2);
+ source.connect(gain3);
+ gain1.connect(context.destination);
+ gain2.connect(context.destination);
+ gain3.connect(context.destination);
+ source.start();
+
+ // This disconnects everything.
+ source.disconnect();
+
+ context.startRendering()
+ .then(function(buffer) {
+
+ // With everything disconnected, the result should be zero.
+ should(buffer.getChannelData(0), 'Channel #0')
+ .beConstantValueOf(0);
+
+ })
+ .then(() => task.done());
+ });
+
+ // Task 2: test disconnect(output) method.
+ audit.define('disconnect(output)', (task, should) => {
+
+ // Create multiple connections from each output of a ChannelSplitter
+ // to a gain node. Then test if disconnecting a single output of
+ // splitter is actually disconnected.
+ let context = new OfflineAudioContext(1, 128, 44100);
+ let source = context.createBufferSource();
+ let buffer3ch = createConstantBuffer(context, 128, [1, 2, 3]);
+ let splitter = context.createChannelSplitter(3);
+ let sum = context.createGain();
+
+ source.buffer = buffer3ch;
+
+ source.connect(splitter);
+ splitter.connect(sum, 0);
+ splitter.connect(sum, 1);
+ splitter.connect(sum, 2);
+ sum.connect(context.destination);
+ source.start();
+
+ // This disconnects the second output.
+ splitter.disconnect(1);
+
+ context.startRendering()
+ .then(function(buffer) {
+
+ // The rendered channel should contain 4. (= 1 + 0 + 3)
+ should(buffer.getChannelData(0), 'Channel #0')
+ .beConstantValueOf(4);
+
+ })
+ .then(() => task.done());
+ });
+
+ // Task 3: test disconnect(AudioNode) method.
+ audit.define('disconnect(AudioNode)', (task, should) => {
+
+ // Connect a source to multiple gain nodes. Then test if disconnecting a
+ // single destination selectively works correctly.
+ let context = new OfflineAudioContext(1, 128, 44100);
+ let source = context.createBufferSource();
+ let buffer1ch = createConstantBuffer(context, 128, [1]);
+ let gain1 = context.createGain();
+ let gain2 = context.createGain();
+ let gain3 = context.createGain();
+ let orphan = context.createGain();
+
+ source.buffer = buffer1ch;
+
+ source.connect(gain1);
+ source.connect(gain2);
+ source.connect(gain3);
+ gain1.connect(context.destination);
+ gain2.connect(context.destination);
+ gain3.connect(context.destination);
+ source.start();
+
+ source.disconnect(gain2);
+
+ context.startRendering()
+ .then(function(buffer) {
+
+ // The |sum| gain node should produce value 2. (1 + 0 + 1 = 2)
+ should(buffer.getChannelData(0), 'Channel #0')
+ .beConstantValueOf(2);
+
+ })
+ .then(() => task.done());
+ });
+
+ // Task 4: test disconnect(AudioNode, output) method.
+ audit.define('disconnect(AudioNode, output)', (task, should) => {
+
+ // Connect a buffer with 2 channels with each containing 1 and 2
+ // respectively to a ChannelSplitter, then connect the splitter to 2
+ // gain nodes as shown below:
+ // (1) splitter#0 => gain1
+ // (2) splitter#0 => gain2
+ // (3) splitter#1 => gain2
+ // Then disconnect (2) and verify if the selective disconnection on a
+ // specified output of the destination node works correctly.
+ let context = new OfflineAudioContext(1, 128, 44100);
+ let source = context.createBufferSource();
+ let buffer2ch = createConstantBuffer(context, 128, [1, 2]);
+ let splitter = context.createChannelSplitter(2);
+ let gain1 = context.createGain();
+ let gain2 = context.createGain();
+
+ source.buffer = buffer2ch;
+
+ source.connect(splitter);
+ splitter.connect(gain1, 0); // gain1 gets channel 0.
+ splitter.connect(gain2, 0); // gain2 sums channel 0 and 1.
+ splitter.connect(gain2, 1);
+ gain1.connect(context.destination);
+ gain2.connect(context.destination);
+ source.start();
+
+ splitter.disconnect(gain2, 0); // Now gain2 gets [2]
+
+ context.startRendering()
+ .then(function(buffer) {
+
+ // The sum of gain1 and gain2 should produce value 3. (= 1 + 2)
+ should(buffer.getChannelData(0), 'Channel #0')
+ .beConstantValueOf(3);
+
+ })
+ .then(() => task.done());
+ });
+
+ // Task 5: test disconnect(AudioNode, output, input) method.
+ audit.define('disconnect(AudioNode, output, input)', (task, should) => {
+
+ // Create a 3-channel buffer with [1, 2, 3] in each channel and then
+ // pass it through a splitter and a merger. Each input/output of the
+ // splitter and the merger is connected in a sequential order as shown
+ // below.
+ // (1) splitter#0 => merger#0
+ // (2) splitter#1 => merger#1
+ // (3) splitter#2 => merger#2
+ // Then disconnect (3) and verify if each channel contains [1] and [2]
+ // respectively.
+ let context = new OfflineAudioContext(3, 128, 44100);
+ let source = context.createBufferSource();
+ let buffer3ch = createConstantBuffer(context, 128, [1, 2, 3]);
+ let splitter = context.createChannelSplitter(3);
+ let merger = context.createChannelMerger(3);
+
+ source.buffer = buffer3ch;
+
+ source.connect(splitter);
+ splitter.connect(merger, 0, 0);
+ splitter.connect(merger, 1, 1);
+ splitter.connect(merger, 2, 2);
+ merger.connect(context.destination);
+ source.start();
+
+ splitter.disconnect(merger, 2, 2);
+
+ context.startRendering()
+ .then(function(buffer) {
+
+ // Each channel should have 1, 2, and 0 respectively.
+ should(buffer.getChannelData(0), 'Channel #0')
+ .beConstantValueOf(1);
+ should(buffer.getChannelData(1), 'Channel #1')
+ .beConstantValueOf(2);
+ should(buffer.getChannelData(2), 'Channel #2')
+ .beConstantValueOf(0);
+
+ })
+ .then(() => task.done());
+ });
+
+ // Task 6: exception checks.
+ audit.define('exceptions', (task, should) => {
+ let context = new OfflineAudioContext(2, 128, 44100);
+ let gain1 = context.createGain();
+ let splitter = context.createChannelSplitter(2);
+ let merger = context.createChannelMerger(2);
+ let gain2 = context.createGain();
+ let gain3 = context.createGain();
+
+ // Connect a splitter to gain nodes and merger so we can test the
+ // possible ways of disconnecting the nodes to verify that appropriate
+ // exceptions are thrown.
+ gain1.connect(splitter);
+ splitter.connect(gain2, 0);
+ splitter.connect(gain3, 1);
+ splitter.connect(merger, 0, 0);
+ splitter.connect(merger, 1, 1);
+ gain2.connect(gain3);
+ gain3.connect(context.destination);
+ merger.connect(context.destination);
+
+ // There is no output #2. An exception should be thrown.
+ should(function() {
+ splitter.disconnect(2);
+ }, 'splitter.disconnect(2)').throw(DOMException, 'IndexSizeError');
+
+ // Disconnecting the output already disconnected should not throw.
+ should(function() {
+ splitter.disconnect(1);
+ splitter.disconnect(1);
+ }, 'Disconnecting a connection twice').notThrow();
+
+ // gain1 is not connected gain2. An exception should be thrown.
+ should(function() {
+ gain1.disconnect(gain2);
+ }, 'gain1.disconnect(gain2)').throw(DOMException, 'InvalidAccessError');
+
+ // gain1 and gain3 are not connected. An exception should be thrown.
+ should(function() {
+ gain1.disconnect(gain3);
+ }, 'gain1.disconnect(gain3)').throw(DOMException, 'InvalidAccessError');
+
+ // There is no output #2 in the splitter. An exception should be thrown.
+ should(function() {
+ splitter.disconnect(gain2, 2);
+ }, 'splitter.disconnect(gain2, 2)').throw(DOMException, 'IndexSizeError');
+
+ // The splitter and gain1 are not connected. An exception should be
+ // thrown.
+ should(function() {
+ splitter.disconnect(gain1, 0);
+ }, 'splitter.disconnect(gain1, 0)').throw(DOMException, 'InvalidAccessError');
+
+ // The splitter output #0 and the gain3 output #0 are not connected. An
+ // exception should be thrown.
+ should(function() {
+ splitter.disconnect(gain3, 0, 0);
+ }, 'splitter.disconnect(gain3, 0, 0)').throw(DOMException, 'InvalidAccessError');
+
+ // The output index is out of bound. An exception should be thrown.
+ should(function() {
+ splitter.disconnect(merger, 3, 0);
+ }, 'splitter.disconnect(merger, 3, 0)').throw(DOMException, 'IndexSizeError');
+
+ task.done();
+ });
+
+ audit.define('disabled-outputs', (task, should) => {
+ // See crbug.com/656652
+ let context = new OfflineAudioContext(2, 1024, 44100);
+ let g1 = context.createGain();
+ let g2 = context.createGain();
+ g1.connect(g2);
+ g1.disconnect(g2);
+ let g3 = context.createGain();
+ g2.connect(g3);
+ g1.connect(g2);
+ context.startRendering()
+ .then(function() {
+ // If we make it here, we passed.
+ should(true, 'Disabled outputs handled')
+ .message('correctly', 'inccorrectly');
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js
new file mode 100644
index 0000000000..89bdf2aa98
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-iframe.window.js
@@ -0,0 +1,14 @@
+test(function() {
+ const iframe =
+ document.createElementNS('http://www.w3.org/1999/xhtml', 'iframe');
+ document.body.appendChild(iframe);
+
+ // Create AudioContext and AudioNode from iframe
+ const context = new iframe.contentWindow.AudioContext();
+ const source = context.createOscillator();
+ source.connect(context.destination);
+
+ // AudioContext should be put closed state after iframe destroyed
+ document.body.removeChild(iframe);
+ assert_equals(context.state, 'closed');
+}, 'Call a constructor from iframe page and then destroy the iframe');
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html
new file mode 100644
index 0000000000..0b57d27e8e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode.html
@@ -0,0 +1,93 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audionode.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <div id="description"></div>
+ <div id="console"></div>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context = 0;
+ let context2 = 0;
+ let context3 = 0;
+
+ audit.define(
+ {label: 'test', description: 'Basic tests for AudioNode API.'},
+ function(task, should) {
+
+ context = new AudioContext();
+ window.audioNode = context.createBufferSource();
+
+ // Check input and output numbers of AudioSourceNode.
+ should(audioNode.numberOfInputs, 'AudioBufferSource.numberOfInputs')
+ .beEqualTo(0);
+ should(
+ audioNode.numberOfOutputs, 'AudioBufferSource.numberOfOutputs')
+ .beEqualTo(1);
+
+ // Check input and output numbers of AudioDestinationNode
+ should(
+ context.destination.numberOfInputs,
+ 'AudioContext.destination.numberOfInputs')
+ .beEqualTo(1);
+ should(
+ context.destination.numberOfOutputs,
+ 'AudioContext.destination.numberOfOutputs')
+ .beEqualTo(0);
+
+ // Try calling connect() method with illegal values.
+ should(
+ () => audioNode.connect(0, 0, 0), 'audioNode.connect(0, 0, 0)')
+ .throw(TypeError);
+ should(
+ () => audioNode.connect(null, 0, 0),
+ 'audioNode.connect(null, 0, 0)')
+ .throw(TypeError);
+ should(
+ () => audioNode.connect(context.destination, 5, 0),
+ 'audioNode.connect(context.destination, 5, 0)')
+ .throw(DOMException, 'IndexSizeError');
+ should(
+ () => audioNode.connect(context.destination, 0, 5),
+ 'audioNode.connect(context.destination, 0, 5)')
+ .throw(DOMException, 'IndexSizeError');
+
+ should(
+ () => audioNode.connect(context.destination, 0, 0),
+ 'audioNode.connect(context.destination, 0, 0)')
+ .notThrow();
+
+ // Create a new context and try to connect the other context's node
+ // to this one.
+ context2 = new AudioContext();
+ should(
+ () => window.audioNode.connect(context2.destination),
+ 'Connecting a node to a different context')
+ .throw(DOMException, 'InvalidAccessError');
+
+ // 3-arg AudioContext doesn't create an offline context anymore.
+ should(
+ () => context3 = new AudioContext(1, 44100, 44100),
+ 'context3 = new AudioContext(1, 44100, 44100)')
+ .throw(TypeError);
+
+ // Ensure it is an EventTarget
+ should(
+ audioNode instanceof EventTarget, 'AudioNode is an EventTarget')
+ .beTrue();
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html
new file mode 100644
index 0000000000..35cfca8e4e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/channel-mode-interp-basic.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Setting of channelCountMode and channelInterpretation
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Fairly arbitrary sample rate and number of frames, except the number of
+ // frames should be more than a few render quantums.
+ let sampleRate = 16000;
+ let renderFrames = 10 * 128;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('interp', (task, should) => {
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let node = context.createGain();
+
+ // Set a new interpretation and verify that it changed.
+ node.channelInterpretation = 'discrete';
+ let value = node.channelInterpretation;
+ should(value, 'node.channelInterpretation').beEqualTo('discrete');
+ node.connect(context.destination);
+
+ context.startRendering()
+ .then(function(buffer) {
+ // After rendering, the value should have been changed.
+ should(
+ node.channelInterpretation,
+ 'After rendering node.channelInterpretation')
+ .beEqualTo('discrete');
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('mode', (task, should) => {
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let node = context.createGain();
+
+ // Set a new mode and verify that it changed.
+ node.channelCountMode = 'explicit';
+ let value = node.channelCountMode;
+ should(value, 'node.channelCountMode').beEqualTo('explicit');
+ node.connect(context.destination);
+
+ context.startRendering()
+ .then(function(buffer) {
+ // After rendering, the value should have been changed.
+ should(
+ node.channelCountMode,
+ 'After rendering node.channelCountMode')
+ .beEqualTo('explicit');
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html
new file mode 100644
index 0000000000..f763d34787
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/different-contexts.html
@@ -0,0 +1,101 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Connections and disconnections with different contexts
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Different contexts to be used for testing.
+ let c1;
+ let c2;
+
+ audit.define(
+ {label: 'setup', description: 'Contexts for testing'},
+ (task, should) => {
+ should(() => {c1 = new AudioContext()}, 'c1 = new AudioContext()')
+ .notThrow();
+ should(() => {c2 = new AudioContext()}, 'c2 = new AudioContext()')
+ .notThrow();
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 1', description: 'Connect nodes between contexts'},
+ (task, should) => {
+ let g1;
+ let g2;
+ should(
+ () => {g1 = new GainNode(c1)}, 'Test 1: g1 = new GainNode(c1)')
+ .notThrow();
+ should(
+ () => {g2 = new GainNode(c2)}, 'Test 1: g2 = new GainNode(c2)')
+ .notThrow();
+ should(() => {g2.connect(g1)}, 'Test 1: g2.connect(g1)')
+ .throw(DOMException, 'InvalidAccessError');
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 2', description: 'Connect AudioParam between contexts'},
+ (task, should) => {
+ let g1;
+ let g2;
+ should(
+ () => {g1 = new GainNode(c1)}, 'Test 2: g1 = new GainNode(c1)')
+ .notThrow();
+ should(
+ () => {g2 = new GainNode(c2)}, 'Test 2: g2 = new GainNode(c2)')
+ .notThrow();
+ should(() => {g2.connect(g1.gain)}, 'Test 2: g2.connect(g1.gain)')
+ .throw(DOMException, 'InvalidAccessError');
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 3', description: 'Disconnect nodes between contexts'},
+ (task, should) => {
+ let g1;
+ let g2;
+ should(
+ () => {g1 = new GainNode(c1)}, 'Test 3: g1 = new GainNode(c1)')
+ .notThrow();
+ should(
+ () => {g2 = new GainNode(c2)}, 'Test 3: g2 = new GainNode(c2)')
+ .notThrow();
+ should(() => {g2.disconnect(g1)}, 'Test 3: g2.disconnect(g1)')
+ .throw(DOMException, 'InvalidAccessError');
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 4',
+ description: 'Disconnect AudioParam between contexts'
+ },
+ (task, should) => {
+ let g1;
+ let g2;
+ should(
+ () => {g1 = new GainNode(c1)}, 'Test 4: g1 = new GainNode(c1)')
+ .notThrow();
+ should(
+ () => {g2 = new GainNode(c2)}, 'Test 4: g2 = new GainNode(c2)')
+ .notThrow();
+ should(
+ () => {g2.disconnect(g1.gain)}, 'Test 4: g2.connect(g1.gain)')
+ .throw(DOMException, 'InvalidAccessError');
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html
new file mode 100644
index 0000000000..ab527b6695
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/adding-events.html
@@ -0,0 +1,144 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Adding Events</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audio-param.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary power of two to eliminate round-off in computing time from
+ // frame.
+ const sampleRate = 8192;
+
+ audit.define(
+ {
+ label: 'linearRamp',
+ description: 'Insert linearRamp after running for some time'
+ },
+ (task, should) => {
+ testInsertion(should, {
+ method: 'linearRampToValueAtTime',
+ prefix: 'linearRamp'
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'expoRamp',
+ description: 'Insert expoRamp after running for some time'
+ },
+ (task, should) => {
+ testInsertion(should, {
+ method: 'exponentialRampToValueAtTime',
+ prefix: 'expoRamp'
+ }).then(() => task.done());
+ });
+
+ // Test insertion of an event in the middle of rendering.
+ //
+ // options dictionary:
+ // method - automation method to test
+ // prefix - string to use for prefixing messages
+ function testInsertion(should, options) {
+ let {method, prefix} = options;
+
+ // Channel 0 is the output for the test, and channel 1 is the
+ // reference output.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: sampleRate, sampleRate: sampleRate});
+ let merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+
+ merger.connect(context.destination);
+
+ // Initial value and final values of the source node
+ let initialValue = 1;
+ let finalValue = 2;
+
+ // Set up the node for the automations under test
+ let src = new ConstantSourceNode(context, {offset: initialValue});
+ src.connect(merger, 0, 0);
+
+ // Set initial event to occur at this time. Keep it in the first
+ // render quantum.
+ const initialEventTime = 64 / context.sampleRate;
+ should(
+ () => src.offset.setValueAtTime(initialValue, initialEventTime),
+ `${prefix}: setValueAtTime(${initialValue}, ${initialEventTime})`)
+ .notThrow();
+
+ // Let time pass and then add a new event with time in the future.
+ let insertAtFrame = 512;
+ let insertTime = insertAtFrame / context.sampleRate;
+ let automationEndFrame = 1024 + 64;
+ let automationEndTime = automationEndFrame / context.sampleRate;
+ context.suspend(insertTime)
+ .then(() => {
+ should(
+ () => src.offset[method](finalValue, automationEndTime),
+ `${prefix}: At time ${insertTime} scheduling ${method}(${
+ finalValue}, ${automationEndTime})`)
+ .notThrow();
+ })
+ .then(() => context.resume());
+
+ // Set up graph for the reference result. Automate the source with
+ // the events scheduled from the beginning. Let the gain node
+ // simulate the insertion of the event above. This is done by
+ // setting the gain to 1 at the insertion time.
+ let srcRef = new ConstantSourceNode(context, {offset: 1});
+ let g = new GainNode(context, {gain: 0});
+ srcRef.connect(g).connect(merger, 0, 1);
+ srcRef.offset.setValueAtTime(initialValue, initialEventTime);
+ srcRef.offset[method](finalValue, automationEndTime);
+
+ // Allow everything through after |insertFrame| frames.
+ g.gain.setValueAtTime(1, insertTime);
+
+ // Go!
+ src.start();
+ srcRef.start();
+
+ return context.startRendering().then(audioBuffer => {
+ let actual = audioBuffer.getChannelData(0);
+ let expected = audioBuffer.getChannelData(1);
+
+ // Verify that the output is 1 until we reach
+ // insertAtFrame. Ignore the expected data because that always
+ // produces 1.
+ should(
+ actual.slice(0, insertAtFrame),
+ `${prefix}: output[0:${insertAtFrame - 1}]`)
+ .beConstantValueOf(initialValue);
+
+ // Verify ramp is correct by comparing it to the expected
+ // data.
+ should(
+ actual.slice(
+ insertAtFrame, automationEndFrame - insertAtFrame + 1),
+ `${prefix}: output[${insertAtFrame}:${
+ automationEndFrame - insertAtFrame}]`)
+ .beCloseToArray(
+ expected.slice(
+ insertAtFrame, automationEndFrame - insertAtFrame + 1),
+ {absoluteThreshold: 0, numberOfArrayElements: 0});
+
+ // Verify final output has the expected value
+ should(
+ actual.slice(automationEndFrame),
+ `${prefix}: output[${automationEndFrame}:]`)
+ .beConstantValueOf(finalValue);
+ })
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html
new file mode 100644
index 0000000000..0a8e7a7f2f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-cancel-and-hold.html
@@ -0,0 +1,855 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test CancelValuesAndHoldAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audio-param.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let renderDuration = 0.5;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'cancelTime', description: 'Test Invalid Values'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: 1,
+ sampleRate: 8000
+ });
+
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ should(
+ () => src.offset.cancelAndHoldAtTime(-1),
+ 'cancelAndHoldAtTime(-1)')
+ .throw(RangeError);
+
+ // These are TypeErrors because |cancelTime| is a
+ // double, not unrestricted double.
+ should(
+ () => src.offset.cancelAndHoldAtTime(NaN),
+ 'cancelAndHoldAtTime(NaN)')
+ .throw(TypeError);
+
+ should(
+ () => src.offset.cancelAndHoldAtTime(Infinity),
+ 'cancelAndHoldAtTime(Infinity)')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // The first few tasks test the cancellation of each relevant automation
+ // function. For the test, a simple linear ramp from 0 to 1 is used to
+ // start things off. Then the automation to be tested is scheduled and
+ // cancelled.
+
+ audit.define(
+ {label: 'linear', description: 'Cancel linearRampToValueAtTime'},
+ function(task, should) {
+ cancelTest(should, linearRampTest('linearRampToValueAtTime'), {
+ valueThreshold: 8.3998e-5,
+ curveThreshold: 5.9605e-5
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {label: 'exponential', description: 'Cancel exponentialRampAtTime'},
+ function(task, should) {
+ // Cancel an exponential ramp. The thresholds are experimentally
+ // determined.
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // After the linear ramp, schedule an exponential ramp to the end.
+ // (This is the event that will be be cancelled.)
+ let v1 = 0.001;
+ let t1 = renderDuration;
+
+ g[0].gain.exponentialRampToValueAtTime(v1, t1);
+ g[1].gain.exponentialRampToValueAtTime(v1, t1);
+
+ expectedConstant = Math.fround(
+ v0 * Math.pow(v1 / v0, (cancelTime - t0) / (t1 - t0)));
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'exponentialRampToValue(' + v1 + ', ' + t1 + ')',
+ summary: 'exponentialRampToValueAtTime',
+ };
+ }, {
+ valueThreshold: 1.8664e-6,
+ curveThreshold: 5.9605e-8
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {label: 'setTarget', description: 'Cancel setTargetAtTime'},
+ function(task, should) {
+ // Cancel a setTarget event.
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // At the end of the linear ramp, schedule a setTarget. (This is
+ // the event that will be cancelled.)
+ let v1 = 0;
+ let t1 = t0;
+ let timeConstant = 0.05;
+
+ g[0].gain.setTargetAtTime(v1, t1, timeConstant);
+ g[1].gain.setTargetAtTime(v1, t1, timeConstant);
+
+ expectedConstant = Math.fround(
+ v1 + (v0 - v1) * Math.exp(-(cancelTime - t0) / timeConstant));
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setTargetAtTime(' + v1 + ', ' + t1 + ', ' +
+ timeConstant + ')',
+ summary: 'setTargetAtTime',
+ };
+ }, {
+ valueThreshold: 4.5267e-7, // 1.1317e-7,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {label: 'setValueCurve', description: 'Cancel setValueCurveAtTime'},
+ function(task, should) {
+ // Cancel a setValueCurve event.
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // After the linear ramp, schedule a setValuesCurve. (This is the
+ // event that will be cancelled.)
+ let v1 = 0;
+ let duration = renderDuration - t0;
+
+ // For simplicity, a 2-point curve so we get a linear interpolated
+ // result.
+ let curve = Float32Array.from([v0, 0]);
+
+ g[0].gain.setValueCurveAtTime(curve, t0, duration);
+ g[1].gain.setValueCurveAtTime(curve, t0, duration);
+
+ let index =
+ Math.floor((curve.length - 1) / duration * (cancelTime - t0));
+
+ let curvePointsPerFrame =
+ (curve.length - 1) / duration / sampleRate;
+ let virtualIndex =
+ (cancelTime - t0) * sampleRate * curvePointsPerFrame;
+
+ let delta = virtualIndex - index;
+ expectedConstant = curve[0] + (curve[1] - curve[0]) * delta;
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setValueCurveAtTime([' + curve + '], ' + t0 +
+ ', ' + duration + ')',
+ summary: 'setValueCurveAtTime',
+ };
+ }, {
+ valueThreshold: 9.5368e-9,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ audit.define(
+ {
+ label: 'setValueCurve after end',
+ description: 'Cancel setValueCurveAtTime after the end'
+ },
+ function(task, should) {
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ // Initialize values to 0.
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ // Schedule a short linear ramp to start things off.
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ // After the linear ramp, schedule a setValuesCurve. (This is the
+ // event that will be cancelled.) Make sure the curve ends before
+ // the cancellation time.
+ let v1 = 0;
+ let duration = cancelTime - t0 - 0.125;
+
+ // For simplicity, a 2-point curve so we get a linear interpolated
+ // result.
+ let curve = Float32Array.from([v0, 0]);
+
+ g[0].gain.setValueCurveAtTime(curve, t0, duration);
+ g[1].gain.setValueCurveAtTime(curve, t0, duration);
+
+ expectedConstant = curve[1];
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setValueCurveAtTime([' + curve + '], ' + t0 +
+ ', ' + duration + ')',
+ summary: 'setValueCurveAtTime',
+ };
+ }, {
+ valueThreshold: 0,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ // Special case where we schedule a setTarget and there is no earlier
+ // automation event. This tests that we pick up the starting point
+ // correctly from the last setting of the AudioParam value attribute.
+
+
+ audit.define(
+ {
+ label: 'initial setTarget',
+ description: 'Cancel with initial setTargetAtTime'
+ },
+ function(task, should) {
+ cancelTest(should, function(g, v0, t0, cancelTime) {
+ let v1 = 0;
+ let timeConstant = 0.1;
+ g[0].gain.value = 1;
+ g[0].gain.setTargetAtTime(v1, t0, timeConstant);
+ g[1].gain.value = 1;
+ g[1].gain.setTargetAtTime(v1, t0, timeConstant);
+
+ let expectedConstant = Math.fround(
+ v1 + (v0 - v1) * Math.exp(-(cancelTime - t0) / timeConstant));
+
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage: 'setTargetAtTime(' + v1 + ', ' + t0 + ', ' +
+ timeConstant + ')',
+ summary: 'Initial setTargetAtTime',
+ };
+ }, {
+ valueThreshold: 3.1210e-6,
+ curveThreshold: 0
+ }).then(task.done.bind(task));
+ });
+
+ // Test automations scheduled after the call to cancelAndHoldAtTime.
+ // Very similar to the above tests, but we also schedule an event after
+ // cancelAndHoldAtTime and verify that curve after cancellation has
+ // the correct values.
+
+ audit.define(
+ {
+ label: 'post cancel: Linear',
+ description: 'LinearRamp after cancelling'
+ },
+ function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be
+ // cancelled. Then schedule another linear ramp after the
+ // cancellation.
+ cancelTest(
+ should,
+ linearRampTest('Post cancellation linearRampToValueAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the linear ramp on g[0], and do the same for g[2],
+ // using the starting point given by expectedConstant.
+ let v2 = 2;
+ let t2 = cancelTime + 0.125;
+ g[0].gain.linearRampToValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.linearRampToValueAtTime(v2, t2);
+ return {
+ constantEndTime: cancelTime,
+ message: 'Post linearRamp(' + v2 + ', ' + t2 + ')'
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define(
+ {
+ label: 'post cancel: Exponential',
+ description: 'ExponentialRamp after cancelling'
+ },
+ function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be
+ // cancelled. Then schedule an exponential ramp after the
+ // cancellation.
+ cancelTest(
+ should,
+ linearRampTest('Post cancel exponentialRampToValueAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let v2 = 2;
+ let t2 = cancelTime + 0.125;
+ g[0].gain.exponentialRampToValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.exponentialRampToValueAtTime(v2, t2);
+ return {
+ constantEndTime: cancelTime,
+ message: 'Post exponentialRamp(' + v2 + ', ' + t2 + ')'
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('post cancel: ValueCurve', function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be cancelled.
+ // Then schedule a setValueCurve after the cancellation.
+ cancelTest(
+ should, linearRampTest('Post cancel setValueCurveAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let t2 = cancelTime + 0.125;
+ let duration = 0.125;
+ let curve = Float32Array.from([.125, 2]);
+ g[0].gain.setValueCurveAtTime(curve, t2, duration);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.setValueCurveAtTime(curve, t2, duration);
+ return {
+ constantEndTime: cancelTime,
+ message: 'Post setValueCurve([' + curve + '], ' + t2 + ', ' +
+ duration + ')',
+ errorThreshold: 8.3998e-5
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('post cancel: setTarget', function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be cancelled.
+ // Then schedule a setTarget after the cancellation.
+ cancelTest(
+ should, linearRampTest('Post cancel setTargetAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let v2 = 0.125;
+ let t2 = cancelTime + 0.125;
+ let timeConstant = 0.1;
+ g[0].gain.setTargetAtTime(v2, t2, timeConstant);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.setTargetAtTime(v2, t2, timeConstant);
+ return {
+ constantEndTime: cancelTime + 0.125,
+ message: 'Post setTargetAtTime(' + v2 + ', ' + t2 + ', ' +
+ timeConstant + ')',
+ errorThreshold: 8.4037e-5
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('post cancel: setValue', function(task, should) {
+ // Run the cancel test using a linearRamp as the event to be cancelled.
+ // Then schedule a setTarget after the cancellation.
+ cancelTest(
+ should, linearRampTest('Post cancel setValueAtTime'),
+ {valueThreshold: 8.3998e-5, curveThreshold: 5.9605e-8},
+ function(g, cancelTime, expectedConstant) {
+ // Schedule the exponential ramp on g[0], and do the same for
+ // g[2], using the starting point given by expectedConstant.
+ let v2 = 0.125;
+ let t2 = cancelTime + 0.125;
+ g[0].gain.setValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(expectedConstant, cancelTime);
+ g[2].gain.setValueAtTime(v2, t2);
+ return {
+ constantEndTime: cancelTime + 0.125,
+ message: 'Post setValueAtTime(' + v2 + ', ' + t2 + ')'
+ };
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel future setTarget', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setTargetAtTime(0, 0.75 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(actual, 'After cancelling future setTarget event, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel setTarget now', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setTargetAtTime(0, 0.5 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(
+ actual,
+ 'After cancelling setTarget event starting now, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel future setValueCurve', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setValueCurveAtTime([-1, 1], 0.75 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(
+ actual, 'After cancelling future setValueCurve event, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define('cancel setValueCurve now', (task, should) => {
+ const context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ const src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ src.offset.setValueAtTime(0.5, 0);
+ src.offset.setValueCurveAtTime([-1, 1], 0.5 * renderDuration, 0.1);
+ // Now cancel the effect of the setTarget.
+ src.offset.cancelAndHoldAtTime(0.5 * renderDuration);
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ // Because the setTarget was cancelled, the output should be a
+ // constant.
+ should(
+ actual,
+ 'After cancelling current setValueCurve event starting now, output')
+ .beConstantValueOf(0.5);
+ })
+ .then(task.done.bind(task));
+ });
+
+ audit.define(
+ {
+ label: 'linear, cancel, linear, cancel, linear',
+ description: 'Schedules 3 linear ramps, cancelling 2 of them, '
+ + 'so that we end up with 2 cancel events next to each other'
+ },
+ (task, should) => {
+ cancelTest2(
+ should,
+ linearRampTest('1st linearRamp'),
+ {valueThreshold: 0, curveThreshold: 5.9605e-8},
+ (g, cancelTime, expectedConstant, cancelTime2) => {
+ // Ramp from first cancel time to the end will be cancelled at
+ // second cancel time.
+ const v1 = expectedConstant;
+ const t1 = cancelTime;
+ const v2 = 2;
+ const t2 = renderDuration;
+ g[0].gain.linearRampToValueAtTime(v2, t2);
+ g[2].gain.setValueAtTime(v1, t1);
+ g[2].gain.linearRampToValueAtTime(v2, t2);
+
+ const expectedConstant2 =
+ audioParamLinearRamp(cancelTime2, v1, t1, v2, t2);
+
+ return {
+ constantEndTime: cancelTime,
+ message: `2nd linearRamp(${v2}, ${t2})`,
+ expectedConstant2
+ };
+ },
+ (g, cancelTime2, expectedConstant2) => {
+ // Ramp from second cancel time to the end.
+ const v3 = 0;
+ const t3 = renderDuration;
+ g[0].gain.linearRampToValueAtTime(v3, t3);
+ g[3].gain.setValueAtTime(expectedConstant2, cancelTime2);
+ g[3].gain.linearRampToValueAtTime(v3, t3);
+ return {
+ constantEndTime2: cancelTime2,
+ message2: `3rd linearRamp(${v3}, ${t3})`,
+ };
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ // Common function for doing a linearRamp test. This just does a linear
+ // ramp from 0 to v0 at from time 0 to t0. Then another linear ramp is
+ // scheduled from v0 to 0 from time t0 to t1. This is the ramp that is to
+ // be cancelled.
+ function linearRampTest(message) {
+ return function(g, v0, t0, cancelTime) {
+ g[0].gain.setValueAtTime(0, 0);
+ g[1].gain.setValueAtTime(0, 0);
+ g[0].gain.linearRampToValueAtTime(v0, t0);
+ g[1].gain.linearRampToValueAtTime(v0, t0);
+
+ let v1 = 0;
+ let t1 = renderDuration;
+ g[0].gain.linearRampToValueAtTime(v1, t1);
+ g[1].gain.linearRampToValueAtTime(v1, t1);
+
+ expectedConstant =
+ Math.fround(v0 + (v1 - v0) * (cancelTime - t0) / (t1 - t0));
+
+ return {
+ expectedConstant: expectedConstant,
+ autoMessage:
+ message + ': linearRampToValue(' + v1 + ', ' + t1 + ')',
+ summary: message,
+ };
+ }
+ }
+
+ // Run the cancellation test. A set of automations is created and
+ // canceled.
+ //
+ // |testerFunction| is a function that generates the automation to be
+ // tested. It is given an array of 3 gain nodes, the value and time of an
+ // initial linear ramp, and the time where the cancellation should occur.
+ // The function must do the automations for the first two gain nodes. It
+ // must return a dictionary with |expectedConstant| being the value at the
+ // cancellation time, |autoMessage| for message to describe the test, and
+ // |summary| for general summary message to be printed at the end of the
+ // test.
+ //
+ // |thresholdOptions| is a property bag that specifies the error threshold
+ // to use. |thresholdOptions.valueThreshold| is the error threshold for
+ // comparing the actual constant output after cancelling to the expected
+ // value. |thresholdOptions.curveThreshold| is the error threshold for
+ // comparing the actual and expected automation curves before the
+ // cancelation point.
+ //
+ // For cancellation tests, |postCancelTest| is a function that schedules
+ // some automation after the cancellation. It takes 3 arguments: an array
+ // of the gain nodes, the cancellation time, and the expected value at the
+ // cancellation time. This function must return a dictionary consisting
+ // of |constantEndtime| indicating when the held constant from
+ // cancellation stops being constant, |message| giving a summary of what
+ // automation is being used, and |errorThreshold| that is the error
+ // threshold between the expected curve and the actual curve.
+ //
+ function cancelTest(
+ should, testerFunction, thresholdOptions, postCancelTest) {
+ // Create a context with three channels. Channel 0 is the test channel
+ // containing the actual output that includes the cancellation of
+ // events. Channel 1 is the expected data upto the cancellation so we
+ // can verify the cancellation produced the correct result. Channel 2
+ // is for verifying events inserted after the cancellation so we can
+ // verify that automations are correctly generated after the
+ // cancellation point.
+ let context =
+ new OfflineAudioContext(3, renderDuration * sampleRate, sampleRate);
+
+ // Test source is a constant signal
+ let src = context.createBufferSource();
+ src.buffer = createConstantBuffer(context, 1, 1);
+ src.loop = true;
+
+ // We'll do the automation tests with three gain nodes. One (g0) will
+ // have cancelAndHoldAtTime and the other (g1) will not. g1 is
+ // used as the expected result for that automation up to the
+ // cancellation point. They should be the same. The third node (g2) is
+ // used for testing automations inserted after the cancellation point,
+ // if any. g2 is the expected result from the cancellation point to the
+ // end of the test.
+
+ let g0 = context.createGain();
+ let g1 = context.createGain();
+ let g2 = context.createGain();
+ let v0 = 1;
+ let t0 = 0.01;
+
+ let cancelTime = renderDuration / 2;
+
+ // Test automation here. The tester function is responsible for setting
+ // up the gain nodes with the desired automation for testing.
+ autoResult = testerFunction([g0, g1, g2], v0, t0, cancelTime);
+ let expectedConstant = autoResult.expectedConstant;
+ let autoMessage = autoResult.autoMessage;
+ let summaryMessage = autoResult.summary;
+
+ // Cancel scheduled events somewhere in the middle of the test
+ // automation.
+ g0.gain.cancelAndHoldAtTime(cancelTime);
+
+ let constantEndTime;
+ if (postCancelTest) {
+ postResult =
+ postCancelTest([g0, g1, g2], cancelTime, expectedConstant);
+ constantEndTime = postResult.constantEndTime;
+ }
+
+ // Connect everything together (with a merger to make a two-channel
+ // result). Channel 0 is the test (with cancelAndHoldAtTime) and
+ // channel 1 is the reference (without cancelAndHoldAtTime).
+ // Channel 1 is used to verify that everything up to the cancellation
+ // has the correct values.
+ src.connect(g0);
+ src.connect(g1);
+ src.connect(g2);
+ let merger = context.createChannelMerger(3);
+ g0.connect(merger, 0, 0);
+ g1.connect(merger, 0, 1);
+ g2.connect(merger, 0, 2);
+ merger.connect(context.destination);
+
+ // Go!
+ src.start();
+
+ return context.startRendering().then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ // The actual output should be a constant from the cancel time to the
+ // end. We use the last value of the actual output as the constant,
+ // but we also want to compare that with what we thought it should
+ // really be.
+
+ let cancelFrame = Math.ceil(cancelTime * sampleRate);
+
+ // Verify that the curves up to the cancel time are "identical". The
+ // should be but round-off may make them differ slightly due to the
+ // way cancelling is done.
+ let endFrame = Math.floor(cancelTime * sampleRate);
+ should(
+ actual.slice(0, endFrame),
+ autoMessage + ' up to time ' + cancelTime)
+ .beCloseToArray(
+ expected.slice(0, endFrame),
+ {absoluteThreshold: thresholdOptions.curveThreshold});
+
+ // Verify the output after the cancellation is a constant.
+ let actualTail;
+ let constantEndFrame;
+
+ if (postCancelTest) {
+ constantEndFrame = Math.ceil(constantEndTime * sampleRate);
+ actualTail = actual.slice(cancelFrame, constantEndFrame);
+ } else {
+ actualTail = actual.slice(cancelFrame);
+ }
+
+ let actualConstant = actual[cancelFrame];
+
+ should(
+ actualTail,
+ 'Cancelling ' + autoMessage + ' at time ' + cancelTime)
+ .beConstantValueOf(actualConstant);
+
+ // Verify that the constant is the value we expect.
+ should(
+ actualConstant,
+ 'Expected value for cancelling ' + autoMessage + ' at time ' +
+ cancelTime)
+ .beCloseTo(
+ expectedConstant,
+ {threshold: thresholdOptions.valueThreshold});
+
+ // Verify the curve after the constantEndTime matches our
+ // expectations.
+ if (postCancelTest) {
+ let c2 = buffer.getChannelData(2);
+ should(actual.slice(constantEndFrame), postResult.message)
+ .beCloseToArray(
+ c2.slice(constantEndFrame),
+ {absoluteThreshold: postResult.errorThreshold || 0});
+ }
+ });
+ }
+
+ // Similar to cancelTest, but does 2 cancels.
+ function cancelTest2(
+ should, testerFunction, thresholdOptions,
+ postCancelTest, postCancelTest2) {
+ // Channel 0: Actual output that includes the cancellation of events.
+ // Channel 1: Expected data up to the first cancellation.
+ // Channel 2: Expected data from 1st cancellation to 2nd cancellation.
+ // Channel 3: Expected data from 2nd cancellation to the end.
+ const context =
+ new OfflineAudioContext(4, renderDuration * sampleRate, sampleRate);
+
+ const src = context.createConstantSource();
+
+ // g0: Actual gain which will have cancelAndHoldAtTime called on it
+ // twice.
+ // g1: Expected gain from start to the 1st cancel.
+ // g2: Expected gain from 1st cancel to the 2nd cancel.
+ // g3: Expected gain from the 2nd cancel to the end.
+ const g0 = context.createGain();
+ const g1 = context.createGain();
+ const g2 = context.createGain();
+ const g3 = context.createGain();
+ const v0 = 1;
+ const t0 = 0.01;
+
+ const cancelTime1 = renderDuration * 0.5;
+ const cancelTime2 = renderDuration * 0.75;
+
+ // Run testerFunction to generate the 1st ramp.
+ const {
+ expectedConstant, autoMessage, summaryMessage} =
+ testerFunction([g0, g1, g2], v0, t0, cancelTime1);
+
+ // 1st cancel, cancelling the 1st ramp.
+ g0.gain.cancelAndHoldAtTime(cancelTime1);
+
+ // Run postCancelTest to generate the 2nd ramp.
+ const {
+ constantEndTime, message, errorThreshold = 0, expectedConstant2} =
+ postCancelTest(
+ [g0, g1, g2], cancelTime1, expectedConstant, cancelTime2);
+
+ // 2nd cancel, cancelling the 2nd ramp.
+ g0.gain.cancelAndHoldAtTime(cancelTime2);
+
+ // Run postCancelTest2 to generate the 3rd ramp.
+ const {constantEndTime2, message2} =
+ postCancelTest2([g0, g1, g2, g3], cancelTime2, expectedConstant2);
+
+ // Connect everything together
+ src.connect(g0);
+ src.connect(g1);
+ src.connect(g2);
+ src.connect(g3);
+ const merger = context.createChannelMerger(4);
+ g0.connect(merger, 0, 0);
+ g1.connect(merger, 0, 1);
+ g2.connect(merger, 0, 2);
+ g3.connect(merger, 0, 3);
+ merger.connect(context.destination);
+
+ // Go!
+ src.start();
+
+ return context.startRendering().then(function (buffer) {
+ const actual = buffer.getChannelData(0);
+ const expected1 = buffer.getChannelData(1);
+ const expected2 = buffer.getChannelData(2);
+ const expected3 = buffer.getChannelData(3);
+
+ const cancelFrame1 = Math.ceil(cancelTime1 * sampleRate);
+ const cancelFrame2 = Math.ceil(cancelTime2 * sampleRate);
+
+ const constantEndFrame1 = Math.ceil(constantEndTime * sampleRate);
+ const constantEndFrame2 = Math.ceil(constantEndTime2 * sampleRate);
+
+ const actualTail1 = actual.slice(cancelFrame1, constantEndFrame1);
+ const actualTail2 = actual.slice(cancelFrame2, constantEndFrame2);
+
+ const actualConstant1 = actual[cancelFrame1];
+ const actualConstant2 = actual[cancelFrame2];
+
+ // Verify first section curve
+ should(
+ actual.slice(0, cancelFrame1),
+ autoMessage + ' up to time ' + cancelTime1)
+ .beCloseToArray(
+ expected1.slice(0, cancelFrame1),
+ {absoluteThreshold: thresholdOptions.curveThreshold});
+
+ // Verify that a value was held after 1st cancel
+ should(
+ actualTail1,
+ 'Cancelling ' + autoMessage + ' at time ' + cancelTime1)
+ .beConstantValueOf(actualConstant1);
+
+ // Verify that held value after 1st cancel was correct
+ should(
+ actualConstant1,
+ 'Expected value for cancelling ' + autoMessage + ' at time ' +
+ cancelTime1)
+ .beCloseTo(
+ expectedConstant,
+ {threshold: thresholdOptions.valueThreshold});
+
+ // Verify middle section curve
+ should(actual.slice(constantEndFrame1, cancelFrame2), message)
+ .beCloseToArray(
+ expected2.slice(constantEndFrame1, cancelFrame2),
+ {absoluteThreshold: errorThreshold});
+
+ // Verify that a value was held after 2nd cancel
+ should(
+ actualTail2,
+ 'Cancelling ' + message + ' at time ' + cancelTime2)
+ .beConstantValueOf(actualConstant2);
+
+ // Verify that held value after 2nd cancel was correct
+ should(
+ actualConstant2,
+ 'Expected value for cancelling ' + message + ' at time ' +
+ cancelTime2)
+ .beCloseTo(
+ expectedConstant2,
+ {threshold: thresholdOptions.valueThreshold});
+
+ // Verify end section curve
+ should(actual.slice(constantEndFrame2), message2)
+ .beCloseToArray(
+ expected3.slice(constantEndFrame2),
+ {absoluteThreshold: errorThreshold || 0});
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html
new file mode 100644
index 0000000000..b5555b0137
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-close.html
@@ -0,0 +1,161 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test AudioParam events very close in time</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Largest sample rate that is required to be supported and is a power of
+ // two, to eliminate round-off as much as possible.
+ const sampleRate = 65536;
+
+ // Only need one render quantum for testing.
+ const testFrames = 128;
+
+ // Largest representable single-float number
+ const floatMax = Math.fround(3.4028234663852886e38);
+
+ // epspos is the smallest x such that 1 + x != 1
+ const epspos = 1.1102230246251568e-16;
+ // epsneg is the smallest x such that 1 - x != 1
+ const epsneg = 5.551115123125784e-17;
+
+ audit.define(
+ {label: 'no-nan', description: 'NaN does not occur'},
+ (task, should) => {
+ const context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ sampleRate: sampleRate,
+ length: testFrames
+ });
+
+ const src0 = new ConstantSourceNode(context, {offset: 0});
+
+ // This should always succeed. We just want to print out a message
+ // that |src0| is a constant source node for the following
+ // processing.
+ should(src0, 'src0 = new ConstantSourceNode(context, {offset: 0})')
+ .beEqualTo(src0);
+
+ src0.connect(context.destination);
+
+ // Values for the first event (setValue). |time1| MUST be 0.
+ const time1 = 0;
+ const value1 = 10;
+
+ // Values for the second event (linearRamp). |value2| must be huge,
+ // and |time2| must be small enough that 1/|time2| overflows a
+ // single float. This value is the least positive single float.
+ const value2 = floatMax;
+ const time2 = 1.401298464324817e-45;
+
+ // These should always succeed; the messages are just informational
+ // to show the events that we scheduled.
+ should(
+ src0.offset.setValueAtTime(value1, time1),
+ `src0.offset.setValueAtTime(${value1}, ${time1})`)
+ .beEqualTo(src0.offset);
+ should(
+ src0.offset.linearRampToValueAtTime(value2, time2),
+ `src0.offset.linearRampToValueAtTime(${value2}, ${time2})`)
+ .beEqualTo(src0.offset);
+
+ src0.start();
+
+ context.startRendering()
+ .then(buffer => {
+ const output = buffer.getChannelData(0);
+
+ // Since time1 = 0, the output at frame 0 MUST be value1.
+ should(output[0], 'output[0]').beEqualTo(value1);
+
+ // Since time2 < 1, output from frame 1 and later must be a
+ // constant.
+ should(output.slice(1), 'output[1]')
+ .beConstantValueOf(value2);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'interpolation', description: 'Interpolation of linear ramp'},
+ (task, should) => {
+ const context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ sampleRate: sampleRate,
+ length: testFrames
+ });
+
+ const src1 = new ConstantSourceNode(context, {offset: 0});
+
+ // This should always succeed. We just want to print out a message
+ // that |src1| is a constant source node for the following
+ // processing.
+ should(src1, 'src1 = new ConstantSourceNode(context, {offset: 0})')
+ .beEqualTo(src1);
+
+ src1.connect(context.destination);
+
+ const frame = 1;
+
+ // These time values are arranged so that time1 < frame/sampleRate <
+ // time2. This means we need to interpolate to get a value at given
+ // frame.
+ //
+ // The values are not so important, but |value2| should be huge.
+ const time1 = frame * (1 - epsneg) / context.sampleRate;
+ const value1 = 1e15;
+
+ const time2 = frame * (1 + epspos) / context.sampleRate;
+ const value2 = floatMax;
+
+ should(
+ src1.offset.setValueAtTime(value1, time1),
+ `src1.offset.setValueAtTime(${value1}, ${time1})`)
+ .beEqualTo(src1.offset);
+ should(
+ src1.offset.linearRampToValueAtTime(value2, time2),
+ `src1.offset.linearRampToValueAtTime(${value2}, ${time2})`)
+ .beEqualTo(src1.offset);
+
+ src1.start();
+
+ context.startRendering()
+ .then(buffer => {
+ const output = buffer.getChannelData(0);
+
+ // Sanity check
+ should(time2 - time1, 'Event time difference')
+ .notBeEqualTo(0);
+
+ // Because 0 < time1 < 1, output must be 0 at time 0.
+ should(output[0], 'output[0]').beEqualTo(0);
+
+ // Because time1 < 1/sampleRate < time2, we need to
+ // interpolate the value between these times to determine the
+ // output at frame 1.
+ const t = frame / context.sampleRate;
+ const v = value1 +
+ (value2 - value1) * (t - time1) / (time2 - time1);
+
+ should(output[1], 'output[1]').beCloseTo(v, {threshold: 0});
+
+ // Because 1 < time2 < 2, the output at frame 2 and higher is
+ // constant.
+ should(output.slice(2), 'output[2:]')
+ .beConstantValueOf(value2);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html
new file mode 100644
index 0000000000..b0455f86bc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-connect-audioratesignal.html
@@ -0,0 +1,103 @@
+<!DOCTYPE html>
+<!--
+Tests that an audio-rate signal (AudioNode output) can be connected to an
+AudioParam. Specifically, this tests that an audio-rate signal coming from an
+AudioBufferSourceNode playing an AudioBuffer containing a specific curve can be
+connected to an AudioGainNode's .gain attribute (an AudioParam). Another
+AudioBufferSourceNode will be the audio source having its gain changed. We load
+this one with an AudioBuffer containing a constant value of 1. Thus it's easy
+to check that the resultant signal should be equal to the gain-scaling curve.
+-->
+<html>
+ <head>
+ <title>
+ audioparam-connect-audioratesignal.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100.0;
+ let lengthInSeconds = 1;
+
+ let context = 0;
+ let constantOneBuffer = 0;
+ let linearRampBuffer = 0;
+
+ function checkResult(renderedBuffer, should) {
+ let renderedData = renderedBuffer.getChannelData(0);
+ let expectedData = linearRampBuffer.getChannelData(0);
+ let n = renderedBuffer.length;
+
+ should(n, 'Rendered signal length').beEqualTo(linearRampBuffer.length);
+
+ // Check that the rendered result exactly matches the buffer used to
+ // control gain. This is because we're changing the gain of a signal
+ // having constant value 1.
+ let success = true;
+ for (let i = 0; i < n; ++i) {
+ if (renderedData[i] != expectedData[i]) {
+ success = false;
+ break;
+ }
+ }
+
+ should(
+ success,
+ 'Rendered signal exactly matches the audio-rate gain changing signal')
+ .beTrue();
+ }
+
+ audit.define('test', function(task, should) {
+ let sampleFrameLength = sampleRate * lengthInSeconds;
+
+ // Create offline audio context.
+ context = new OfflineAudioContext(1, sampleFrameLength, sampleRate);
+
+ // Create buffer used by the source which will have its gain controlled.
+ constantOneBuffer = createConstantBuffer(context, sampleFrameLength, 1);
+
+ // Create buffer used to control gain.
+ linearRampBuffer = createLinearRampBuffer(context, sampleFrameLength);
+
+ // Create the two sources.
+
+ let constantSource = context.createBufferSource();
+ constantSource.buffer = constantOneBuffer;
+
+ let gainChangingSource = context.createBufferSource();
+ gainChangingSource.buffer = linearRampBuffer;
+
+ // Create a gain node controlling the gain of constantSource and make
+ // the connections.
+ let gainNode = context.createGain();
+
+ // Intrinsic baseline gain of zero.
+ gainNode.gain.value = 0;
+
+ constantSource.connect(gainNode);
+ gainNode.connect(context.destination);
+
+ // Connect an audio-rate signal to control the .gain AudioParam.
+ // This is the heart of what is being tested.
+ gainChangingSource.connect(gainNode.gain);
+
+ // Start both sources at time 0.
+ constantSource.start(0);
+ gainChangingSource.start(0);
+
+ context.startRendering().then(buffer => {
+ checkResult(buffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html
new file mode 100644
index 0000000000..982731d338
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exceptional-values.html
@@ -0,0 +1,240 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audioparam-exceptional-values.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Context to use for all of the tests. The context isn't used for any
+ // processing; just need one for creating a gain node, which is used for
+ // all the tests.
+ let context;
+
+ // For these values, AudioParam methods should throw a Typeerror because
+ // they are not finite values.
+ let nonFiniteValues = [Infinity, -Infinity, NaN];
+
+ audit.define('initialize', (task, should) => {
+ should(() => {
+ // Context for testing. Rendering isn't done, so any valid values can
+ // be used here so might as well make them small.
+ context = new OfflineAudioContext(1, 1, 8000);
+ }, 'Creating context for testing').notThrow();
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'test value',
+ description: 'Test non-finite arguments for AudioParam value'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Default method for generating the arguments for an automation
+ // method for testing the value of the automation.
+ let defaultFuncArg = (value) => [value, 1];
+
+ // Test the value parameter
+ doTests(should, gain, TypeError, nonFiniteValues, [
+ {automationName: 'setValueAtTime', funcArg: defaultFuncArg}, {
+ automationName: 'linearRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (value) => [value, 1, 1]
+ }
+ ]);
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'test time',
+ description: 'Test non-finite arguments for AudioParam time'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Default method for generating the arguments for an automation
+ // method for testing the time parameter of the automation.
+ let defaultFuncArg = (startTime) => [1, startTime];
+
+ // Test the time parameter
+ doTests(should, gain, TypeError, nonFiniteValues, [
+ {automationName: 'setValueAtTime', funcArg: defaultFuncArg},
+ {
+ automationName: 'linearRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ // Test start time for setTarget
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (startTime) => [1, startTime, 1]
+ },
+ // Test time constant for setTarget
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (timeConstant) => [1, 1, timeConstant]
+ },
+ ]);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'test setValueCurve',
+ description: 'Test non-finite arguments for setValueCurveAtTime'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Just an array for use by setValueCurveAtTime. The length and
+ // contents of the array are not important.
+ let curve = new Float32Array(3);
+
+ doTests(should, gain, TypeError, nonFiniteValues, [
+ {
+ automationName: 'setValueCurveAtTime',
+ funcArg: (startTime) => [curve, startTime, 1]
+ },
+ ]);
+
+ // Non-finite values for the curve should signal an error
+ doTests(
+ should, gain, TypeError,
+ [[1, 2, Infinity, 3], [1, NaN, 2, 3]], [{
+ automationName: 'setValueCurveAtTime',
+ funcArg: (c) => [c, 1, 1]
+ }]);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'special cases 1',
+ description: 'Test exceptions for finite values'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ // Default method for generating the arguments for an automation
+ // method for testing the time parameter of the automation.
+ let defaultFuncArg = (startTime) => [1, startTime];
+
+ // Test the time parameter
+ let curve = new Float32Array(3);
+ doTests(should, gain, RangeError, [-1], [
+ {automationName: 'setValueAtTime', funcArg: defaultFuncArg},
+ {
+ automationName: 'linearRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: defaultFuncArg
+ },
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (startTime) => [1, startTime, 1]
+ },
+ // Test time constant
+ {
+ automationName: 'setTargetAtTime',
+ funcArg: (timeConstant) => [1, 1, timeConstant]
+ },
+ // startTime and duration for setValueCurve
+ {
+ automationName: 'setValueCurveAtTime',
+ funcArg: (startTime) => [curve, startTime, 1]
+ },
+ {
+ automationName: 'setValueCurveAtTime',
+ funcArg: (duration) => [curve, 1, duration]
+ },
+ ]);
+
+ // Two final tests for setValueCurve: duration must be strictly
+ // positive.
+ should(
+ () => gain.gain.setValueCurveAtTime(curve, 1, 0),
+ 'gain.gain.setValueCurveAtTime(curve, 1, 0)')
+ .throw(RangeError);
+ should(
+ () => gain.gain.setValueCurveAtTime(curve, 1, -1),
+ 'gain.gain.setValueCurveAtTime(curve, 1, -1)')
+ .throw(RangeError);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'special cases 2',
+ description: 'Test special cases for expeonentialRamp'
+ },
+ (task, should) => {
+ let gain = context.createGain();
+
+ doTests(should, gain, RangeError, [0, -1e-100, 1e-100], [{
+ automationName: 'exponentialRampToValueAtTime',
+ funcArg: (value) => [value, 1]
+ }]);
+
+ task.done();
+ });
+
+ audit.run();
+
+ // Run test over the set of values in |testValues| for all of the
+ // automation methods in |testMethods|. The expected error type is
+ // |errorName|. |testMethods| is an array of dictionaries with attributes
+ // |automationName| giving the name of the automation method to be tested
+ // and |funcArg| being a function of one parameter that produces an array
+ // that will be used as the argument to the automation method.
+ function doTests(should, node, errorName, testValues, testMethods) {
+ testValues.forEach(value => {
+ testMethods.forEach(method => {
+ let args = method.funcArg(value);
+ let message = 'gain.gain.' + method.automationName + '(' +
+ argString(args) + ')';
+ should(() => node.gain[method.automationName](...args), message)
+ .throw(errorName);
+ });
+ });
+ }
+
+ // Specialized printer for automation arguments so that messages make
+ // sense. We assume the first element is either a number or an array. If
+ // it's an array, there are always three elements, and we want to print
+ // out the brackets for the array argument.
+ function argString(arg) {
+ if (typeof(arg[0]) === 'number') {
+ return arg.toString();
+ }
+
+ return '[' + arg[0] + '],' + arg[1] + ',' + arg[2];
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html
new file mode 100644
index 0000000000..bec4c1286b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-exponentialRampToValueAtTime.html
@@ -0,0 +1,63 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.exponentialRampToValueAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() and exponentialRampToValueAtTime() at regular
+ // intervals to set the starting and ending values for an exponential
+ // ramp. Each time interval has a ramp with a different starting and
+ // ending value so that there is a discontinuity at each time interval
+ // boundary. The discontinuity is for testing timing. Also, we alternate
+ // between an increasing and decreasing ramp for each interval.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 1.222e-5;
+
+ // The AudioGainNode starts with this value instead of the default value.
+ let initialValue = 100;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // Generate an exponential ramp ending at time |endTime| with an ending
+ // value of |value|.
+ function generateRamp(value, startTime, endTime){
+ // |startTime| is ignored because the exponential ramp
+ // uses the value from the setValueAtTime() call above.
+ gainNode.gain.exponentialRampToValueAtTime(value, endTime)}
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'AudioParam exponentialRampToValueAtTime() functionality'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, initialValue, setValue,
+ generateRamp, 'exponentialRampToValueAtTime()', maxAllowedError,
+ createExponentialRampArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html
new file mode 100644
index 0000000000..d8f38eeba0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ AudioParam with Huge End Time
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ // Render for some small (but fairly arbitrary) time.
+ let renderDuration = 0.125;
+ // Any huge time value that won't fit in a size_t (2^64 on a 64-bit
+ // machine).
+ let largeTime = 1e300;
+
+ let audit = Audit.createTaskRunner();
+
+ // See crbug.com/582701. Create an audioparam with a huge end time and
+ // verify that to automation is run. We don't care about the actual
+ // results, just that it runs.
+
+ // Test linear ramp with huge end time
+ audit.define('linearRamp', (task, should) => {
+ let graph = createGraph();
+ graph.gain.gain.linearRampToValueAtTime(0.1, largeTime);
+
+ graph.source.start();
+ graph.context.startRendering()
+ .then(function(buffer) {
+ should(true, 'linearRampToValue(0.1, ' + largeTime + ')')
+ .message('successfully rendered', 'unsuccessfully rendered');
+ })
+ .then(() => task.done());
+ });
+
+ // Test exponential ramp with huge end time
+ audit.define('exponentialRamp', (task, should) => {
+ let graph = createGraph();
+ graph.gain.gain.exponentialRampToValueAtTime(.1, largeTime);
+
+ graph.source.start();
+ graph.context.startRendering()
+ .then(function(buffer) {
+ should(true, 'exponentialRampToValue(0.1, ' + largeTime + ')')
+ .message('successfully rendered', 'unsuccessfully rendered');
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ // Create the graph and return the context, the source, and the gain node.
+ function createGraph() {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ let src = context.createBufferSource();
+ src.buffer = createConstantBuffer(context, 1, 1);
+ src.loop = true;
+ let gain = context.createGain();
+ src.connect(gain);
+ gain.connect(context.destination);
+ gain.gain.setValueAtTime(1, 0.1 / sampleRate);
+
+ return {context: context, gain: gain, source: src};
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html
new file mode 100644
index 0000000000..509c254d92
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-linearRampToValueAtTime.html
@@ -0,0 +1,60 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.linearRampToValueAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() and linearRampToValueAtTime() at regular intervals to
+ // set the starting and ending values for a linear ramp. Each time
+ // interval has a ramp with a different starting and ending value so that
+ // there is a discontinuity at each time interval boundary. The
+ // discontinuity is for testing timing. Also, we alternate between an
+ // increasing and decreasing ramp for each interval.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 1.865e-6;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // Generate a linear ramp ending at time |endTime| with an ending value of
+ // |value|.
+ function generateRamp(value, startTime, endTime){
+ // |startTime| is ignored because the linear ramp uses the value from
+ // the
+ // setValueAtTime() call above.
+ gainNode.gain.linearRampToValueAtTime(value, endTime)}
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam linearRampToValueAtTime() functionality'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, 1, setValue, generateRamp,
+ 'linearRampToValueAtTime()', maxAllowedError,
+ createLinearRampArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html
new file mode 100644
index 0000000000..ffe46035fd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audioparam-method-chaining.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 8000;
+
+ // Create a dummy array for setValueCurveAtTime method.
+ let curveArray = new Float32Array([5.0, 6.0]);
+
+ // AudioNode dictionary with associated dummy arguments.
+ let methodDictionary = [
+ {name: 'setValueAtTime', args: [1.0, 0.0]},
+ {name: 'linearRampToValueAtTime', args: [2.0, 1.0]},
+ {name: 'exponentialRampToValueAtTime', args: [3.0, 2.0]},
+ {name: 'setTargetAtTime', args: [4.0, 2.0, 0.5]},
+ {name: 'setValueCurveAtTime', args: [curveArray, 5.0, 1.0]},
+ {name: 'cancelScheduledValues', args: [6.0]}
+ ];
+
+ let audit = Audit.createTaskRunner();
+
+ // Task: testing entries from the dictionary.
+ audit.define('from-dictionary', (task, should) => {
+ let context = new AudioContext();
+
+ methodDictionary.forEach(function(method) {
+ let sourceParam = context.createGain().gain;
+ should(
+ sourceParam === sourceParam[method.name](...method.args),
+ 'The return value of ' + sourceParam.constructor.name + '.' +
+ method.name + '()' +
+ ' matches the source AudioParam')
+ .beEqualTo(true);
+
+ });
+
+ task.done();
+ });
+
+ // Task: test method chaining with invalid operation.
+ audit.define('invalid-operation', (task, should) => {
+ let context = new OfflineAudioContext(1, sampleRate, sampleRate);
+ let osc = context.createOscillator();
+ let amp1 = context.createGain();
+ let amp2 = context.createGain();
+
+ osc.connect(amp1);
+ osc.connect(amp2);
+ amp1.connect(context.destination);
+ amp2.connect(context.destination);
+
+ // The first operation fails with an exception, thus the second one
+ // should not have effect on the parameter value. Instead, it should
+ // maintain the default value of 1.0.
+ should(
+ function() {
+ amp1.gain.setValueAtTime(0.25, -1.0)
+ .linearRampToValueAtTime(2.0, 1.0);
+ },
+ 'Calling setValueAtTime() with a negative end time')
+ .throw(RangeError);
+
+ // The first operation succeeds but the second fails due to zero target
+ // value for the exponential ramp. Thus only the first should have
+ // effect on the parameter value, setting the value to 0.5.
+ should(
+ function() {
+ amp2.gain.setValueAtTime(0.5, 0.0).exponentialRampToValueAtTime(
+ 0.0, 1.0);
+ },
+ 'Calling exponentialRampToValueAtTime() with a zero target value')
+ .throw(RangeError);
+
+ osc.start();
+ osc.stop(1.0);
+
+ context.startRendering()
+ .then(function(buffer) {
+ should(amp1.gain.value, 'The gain value of the first gain node')
+ .beEqualTo(1.0);
+ should(amp2.gain.value, 'The gain value of the second gain node')
+ .beEqualTo(0.5);
+ })
+ .then(() => task.done());
+ });
+
+ // Task: verify if the method chaining actually works. Create an arbitrary
+ // envelope and compare the result with the expected one created by JS
+ // code.
+ audit.define('verification', (task, should) => {
+ let context = new OfflineAudioContext(1, sampleRate * 4, sampleRate);
+ let constantBuffer = createConstantBuffer(context, 1, 1.0);
+
+ let source = context.createBufferSource();
+ source.buffer = constantBuffer;
+ source.loop = true;
+
+ let envelope = context.createGain();
+
+ source.connect(envelope);
+ envelope.connect(context.destination);
+
+ envelope.gain.setValueAtTime(0.0, 0.0)
+ .linearRampToValueAtTime(1.0, 1.0)
+ .exponentialRampToValueAtTime(0.5, 2.0)
+ .setTargetAtTime(0.001, 2.0, 0.5);
+
+ source.start();
+
+ context.startRendering()
+ .then(function(buffer) {
+ let expectedEnvelope =
+ createLinearRampArray(0.0, 1.0, 0.0, 1.0, sampleRate);
+ expectedEnvelope.push(...createExponentialRampArray(
+ 1.0, 2.0, 1.0, 0.5, sampleRate));
+ expectedEnvelope.push(...createExponentialApproachArray(
+ 2.0, 4.0, 0.5, 0.001, sampleRate, 0.5));
+
+ // There are slight differences between JS implementation of
+ // AudioParam envelope and the internal implementation. (i.e.
+ // double/float and rounding up) The error threshold is adjusted
+ // empirically through the local testing.
+ should(buffer.getChannelData(0), 'The rendered envelope')
+ .beCloseToArray(
+ expectedEnvelope, {absoluteThreshold: 4.0532e-6});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html
new file mode 100644
index 0000000000..517fc6e956
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-nominal-range.html
@@ -0,0 +1,497 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam Nominal Range Values
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Some arbitrary sample rate for the offline context.
+ let sampleRate = 48000;
+
+ // The actual contexts to use. Generally use the offline context for
+ // testing except for the media nodes which require an AudioContext.
+ let offlineContext;
+ let audioContext;
+
+ // The set of all methods that we've tested for verifying that we tested
+ // all of the necessary objects.
+ let testedMethods = new Set();
+
+ // The most positive single float value (the value just before infinity).
+ // Be careful when changing this value! Javascript only uses double
+ // floats, so the value here should be the max single-float value,
+ // converted directly to a double-float value. This also depends on
+ // Javascript reading this value and producing the desired double-float
+ // value correctly.
+ let mostPositiveFloat = 3.4028234663852886e38;
+
+ let audit = Audit.createTaskRunner();
+
+ // Array describing the tests that should be run. |testOfflineConfigs| is
+ // for tests that can use an offline context. |testOnlineConfigs| is for
+ // tests that need to use an online context. Offline contexts are
+ // preferred when possible.
+ let testOfflineConfigs = [
+ {
+ // The name of the method to create the particular node to be tested.
+ creator: 'createGain',
+
+ // Any args to pass to the creator function.
+ args: [],
+
+ // The min/max limits for each AudioParam of the node. This is a
+ // dictionary whose keys are
+ // the names of each AudioParam in the node. Don't define this if the
+ // node doesn't have any
+ // AudioParam attributes.
+ limits: {
+ gain: {
+ // The expected min and max values for this AudioParam.
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat
+ }
+ }
+ },
+ {
+ creator: 'createDelay',
+ // Just specify a non-default value for the maximum delay so we can
+ // make sure the limits are
+ // set correctly.
+ args: [1.5],
+ limits: {delayTime: {minValue: 0, maxValue: 1.5}}
+ },
+ {
+ creator: 'createBufferSource',
+ args: [],
+ limits: {
+ playbackRate:
+ {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat},
+ detune: {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat}
+ }
+ },
+ {
+ creator: 'createStereoPanner',
+ args: [],
+ limits: {pan: {minValue: -1, maxValue: 1}}
+ },
+ {
+ creator: 'createDynamicsCompressor',
+ args: [],
+ // Do not set limits for reduction; it's currently an AudioParam but
+ // should be a float.
+ // So let the test fail for reduction. When reduction is changed,
+ // this test will then
+ // correctly pass.
+ limits: {
+ threshold: {minValue: -100, maxValue: 0},
+ knee: {minValue: 0, maxValue: 40},
+ ratio: {minValue: 1, maxValue: 20},
+ attack: {minValue: 0, maxValue: 1},
+ release: {minValue: 0, maxValue: 1}
+ }
+ },
+ {
+ creator: 'createBiquadFilter',
+ args: [],
+ limits: {
+ gain: {
+ minValue: -mostPositiveFloat,
+ // This complicated expression is used to get all the arithmetic
+ // to round to the correct single-precision float value for the
+ // desired max. This also assumes that the implication computes
+ // the limit as 40 * log10f(std::numeric_limits<float>::max()).
+ maxValue:
+ Math.fround(40 * Math.fround(Math.log10(mostPositiveFloat)))
+ },
+ Q: {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat},
+ frequency: {minValue: 0, maxValue: sampleRate / 2},
+ detune: {
+ minValue: -Math.fround(1200 * Math.log2(mostPositiveFloat)),
+ maxValue: Math.fround(1200 * Math.log2(mostPositiveFloat))
+ }
+ }
+ },
+ {
+ creator: 'createOscillator',
+ args: [],
+ limits: {
+ frequency: {minValue: -sampleRate / 2, maxValue: sampleRate / 2},
+ detune: {
+ minValue: -Math.fround(1200 * Math.log2(mostPositiveFloat)),
+ maxValue: Math.fround(1200 * Math.log2(mostPositiveFloat))
+ }
+ }
+ },
+ {
+ creator: 'createPanner',
+ args: [],
+ limits: {
+ positionX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ orientationX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ orientationY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ orientationZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ }
+ },
+ },
+ {
+ creator: 'createConstantSource',
+ args: [],
+ limits: {
+ offset: {minValue: -mostPositiveFloat, maxValue: mostPositiveFloat}
+ }
+ },
+ // These nodes don't have AudioParams, but we want to test them anyway.
+ // Any arguments for the
+ // constructor are pretty much arbitrary; they just need to be valid.
+ {
+ creator: 'createBuffer',
+ args: [1, 1, sampleRate],
+ },
+ {creator: 'createIIRFilter', args: [[1, 2], [1, .9]]},
+ {
+ creator: 'createWaveShaper',
+ args: [],
+ },
+ {
+ creator: 'createConvolver',
+ args: [],
+ },
+ {
+ creator: 'createAnalyser',
+ args: [],
+ },
+ {
+ creator: 'createScriptProcessor',
+ args: [0],
+ },
+ {
+ creator: 'createPeriodicWave',
+ args: [Float32Array.from([0, 0]), Float32Array.from([1, 0])],
+ },
+ {
+ creator: 'createChannelSplitter',
+ args: [],
+ },
+ {
+ creator: 'createChannelMerger',
+ args: [],
+ },
+ ];
+
+ let testOnlineConfigs = [
+ {creator: 'createMediaElementSource', args: [new Audio()]},
+ {creator: 'createMediaStreamDestination', args: []}
+ // Can't currently test MediaStreamSource because we're using an offline
+ // context.
+ ];
+
+ // Create the contexts so we can use it in the following test.
+ audit.define('initialize', (task, should) => {
+ // Just any context so that we can create the nodes.
+ should(() => {
+ offlineContext = new OfflineAudioContext(1, 1, sampleRate);
+ }, 'Create offline context for tests').notThrow();
+ should(() => {
+ onlineContext = new AudioContext();
+ }, 'Create online context for tests').notThrow();
+ task.done();
+ });
+
+ // Create a task for each entry in testOfflineConfigs
+ for (let test in testOfflineConfigs) {
+ let config = testOfflineConfigs[test]
+ audit.define('Offline ' + config.creator, (function(c) {
+ return (task, should) => {
+ let node = offlineContext[c.creator](...c.args);
+ testLimits(should, c.creator, node, c.limits);
+ task.done();
+ };
+ })(config));
+ }
+
+ for (let test in testOnlineConfigs) {
+ let config = testOnlineConfigs[test]
+ audit.define('Online ' + config.creator, (function(c) {
+ return (task, should) => {
+ let node = onlineContext[c.creator](...c.args);
+ testLimits(should, c.creator, node, c.limits);
+ task.done();
+ };
+ })(config));
+ }
+
+ // Test the AudioListener params that were added for the automated Panner
+ audit.define('AudioListener', (task, should) => {
+ testLimits(should, '', offlineContext.listener, {
+ positionX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ positionZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ forwardX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ forwardY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ forwardZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ upX: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ upY: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ },
+ upZ: {
+ minValue: -mostPositiveFloat,
+ maxValue: mostPositiveFloat,
+ }
+ });
+ task.done();
+ });
+
+ // Verify that we have tested all the create methods available on the
+ // context.
+ audit.define('verifyTests', (task, should) => {
+ let allNodes = new Set();
+ // Create the set of all "create" methods from the context.
+ for (let method in offlineContext) {
+ if (typeof offlineContext[method] === 'function' &&
+ method.substring(0, 6) === 'create') {
+ allNodes.add(method);
+ }
+ }
+
+ // Compute the difference between the set of all create methods on the
+ // context and the set of tests that we've run.
+ let diff = new Set([...allNodes].filter(x => !testedMethods.has(x)));
+
+ // Can't currently test a MediaStreamSourceNode, so remove it from the
+ // diff set.
+ diff.delete('createMediaStreamSource');
+
+ // It's a test failure if we didn't test all of the create methods in
+ // the context (except createMediaStreamSource, of course).
+ let output = [];
+ if (diff.size) {
+ for (let item of diff)
+ output.push(' ' + item.substring(6));
+ }
+
+ should(output.length === 0, 'Number of nodes not tested')
+ .message(': 0', ': ' + output);
+
+ task.done();
+ });
+
+ // Simple test of a few automation methods to verify we get warnings.
+ audit.define('automation', (task, should) => {
+ // Just use a DelayNode for testing because the audio param has finite
+ // limits.
+ should(() => {
+ let d = offlineContext.createDelay();
+
+ // The console output should have the warnings that we're interested
+ // in.
+ d.delayTime.setValueAtTime(-1, 0);
+ d.delayTime.linearRampToValueAtTime(2, 1);
+ d.delayTime.exponentialRampToValueAtTime(3, 2);
+ d.delayTime.setTargetAtTime(-1, 3, .1);
+ d.delayTime.setValueCurveAtTime(
+ Float32Array.from([.1, .2, 1.5, -1]), 4, .1);
+ }, 'Test automations (check console logs)').notThrow();
+ task.done();
+ });
+
+ audit.run();
+
+ // Is |object| an AudioParam? We determine this by checking the
+ // constructor name.
+ function isAudioParam(object) {
+ return object && object.constructor.name === 'AudioParam';
+ }
+
+ // Does |limitOptions| exist and does it have valid values for the
+ // expected min and max values?
+ function hasValidLimits(limitOptions) {
+ return limitOptions && (typeof limitOptions.minValue === 'number') &&
+ (typeof limitOptions.maxValue === 'number');
+ }
+
+ // Check the min and max values for the AudioParam attribute named
+ // |paramName| for the |node|. The expected limits is given by the
+ // dictionary |limits|. If some test fails, add the name of the failed
+ function validateAudioParamLimits(should, node, paramName, limits) {
+ let nodeName = node.constructor.name;
+ let parameter = node[paramName];
+ let prefix = nodeName + '.' + paramName;
+
+ let success = true;
+ if (hasValidLimits(limits[paramName])) {
+ // Verify that the min and max values for the parameter are correct.
+ let isCorrect = should(parameter.minValue, prefix + '.minValue')
+ .beEqualTo(limits[paramName].minValue);
+ isCorrect = should(parameter.maxValue, prefix + '.maxValue')
+ .beEqualTo(limits[paramName].maxValue) &&
+ isCorrect;
+
+ // Verify that the min and max attributes are read-only. |testValue|
+ // MUST be a number that can be represented exactly the same way as
+ // both a double and single float. A small integer works nicely.
+ const testValue = 42;
+ parameter.minValue = testValue;
+ let isReadOnly;
+ isReadOnly =
+ should(parameter.minValue, `${prefix}.minValue = ${testValue}`)
+ .notBeEqualTo(testValue);
+
+ should(isReadOnly, prefix + '.minValue is read-only').beEqualTo(true);
+
+ isCorrect = isReadOnly && isCorrect;
+
+ parameter.maxValue = testValue;
+ isReadOnly =
+ should(parameter.maxValue, `${prefix}.maxValue = ${testValue}`)
+ .notBeEqualTo(testValue);
+ should(isReadOnly, prefix + '.maxValue is read-only').beEqualTo(true);
+
+ isCorrect = isReadOnly && isCorrect;
+
+ // Now try to set the parameter outside the nominal range.
+ let newValue = 2 * limits[paramName].minValue - 1;
+
+ let isClipped = true;
+ let clippingTested = false;
+ // If the new value is beyond float the largest single-precision
+ // float, skip the test because Chrome throws an error.
+ if (newValue >= -mostPositiveFloat) {
+ parameter.value = newValue;
+ clippingTested = true;
+ isClipped =
+ should(
+ parameter.value, 'Set ' + prefix + '.value = ' + newValue)
+ .beEqualTo(parameter.minValue) &&
+ isClipped;
+ }
+
+ newValue = 2 * limits[paramName].maxValue + 1;
+
+ if (newValue <= mostPositiveFloat) {
+ parameter.value = newValue;
+ clippingTested = true;
+ isClipped =
+ should(
+ parameter.value, 'Set ' + prefix + '.value = ' + newValue)
+ .beEqualTo(parameter.maxValue) &&
+ isClipped;
+ }
+
+ if (clippingTested) {
+ should(
+ isClipped,
+ prefix + ' was clipped to lie within the nominal range')
+ .beEqualTo(true);
+ }
+
+ isCorrect = isCorrect && isClipped;
+
+ success = isCorrect && success;
+ } else {
+ // Test config didn't specify valid limits. Fail this test!
+ should(
+ clippingTested,
+ 'Limits for ' + nodeName + '.' + paramName +
+ ' were correctly defined')
+ .beEqualTo(false);
+
+ success = false;
+ }
+
+ return success;
+ }
+
+ // Test all of the AudioParams for |node| using the expected values in
+ // |limits|. |creatorName| is the name of the method to create the node,
+ // and is used to keep trakc of which tests we've run.
+ function testLimits(should, creatorName, node, limits) {
+ let nodeName = node.constructor.name;
+ testedMethods.add(creatorName);
+
+ let success = true;
+
+ // List of all of the AudioParams that were tested.
+ let audioParams = [];
+
+ // List of AudioParams that failed the test.
+ let incorrectParams = [];
+
+ // Look through all of the keys for the node and extract just the
+ // AudioParams
+ Object.keys(node.__proto__).forEach(function(paramName) {
+ if (isAudioParam(node[paramName])) {
+ audioParams.push(paramName);
+ let isValid = validateAudioParamLimits(
+ should, node, paramName, limits, incorrectParams);
+ if (!isValid)
+ incorrectParams.push(paramName);
+
+ success = isValid && success;
+ }
+ });
+
+ // Print an appropriate message depending on whether there were
+ // AudioParams defined or not.
+ if (audioParams.length) {
+ let message =
+ 'Nominal ranges for AudioParam(s) of ' + node.constructor.name;
+ should(success, message)
+ .message('are correct', 'are incorrect for: ' + +incorrectParams);
+ return success;
+ } else {
+ should(!limits, nodeName)
+ .message(
+ 'has no AudioParams as expected',
+ 'has no AudioParams but test expected ' + limits);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html
new file mode 100644
index 0000000000..faf00c007b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setTargetAtTime.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.setTargetAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() and setTargetAtTime at regular intervals to set the
+ // starting value and the target value. Each time interval has a ramp with
+ // a different starting and target value so that there is a discontinuity
+ // at each time interval boundary. The discontinuity is for testing
+ // timing. Also, we alternate between an increasing and decreasing ramp
+ // for each interval.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 6.5683e-4
+
+ // The AudioGainNode starts with this value instead of the default value.
+ let initialValue = 100;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // Generate an exponential approach starting at |startTime| with a target
+ // value of |value|.
+ function automation(value, startTime, endTime){
+ // endTime is not used for setTargetAtTime.
+ gainNode.gain.setTargetAtTime(value, startTime, timeConstant)}
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam setTargetAtTime() functionality.'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, initialValue, setValue, automation,
+ 'setTargetAtTime()', maxAllowedError,
+ createExponentialApproachArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html
new file mode 100644
index 0000000000..ab2edfd009
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueAtTime.html
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audioparam-setValueAtTime.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode, and call
+ // setValueAtTime() at regular intervals to set the value for the duration
+ // of the interval. Each time interval has different value so that there
+ // is a discontinuity at each time interval boundary. The discontinuity
+ // is for testing timing.
+
+ // Number of tests to run.
+ let numberOfTests = 100;
+
+ // Max allowed difference between the rendered data and the expected
+ // result.
+ let maxAllowedError = 6e-8;
+
+ // Set the gain node value to the specified value at the specified time.
+ function setValue(value, time) {
+ gainNode.gain.setValueAtTime(value, time);
+ }
+
+ // For testing setValueAtTime(), we don't need to do anything for
+ // automation. because the value at the beginning of the interval is set
+ // by setValue and it remains constant for the duration, which is what we
+ // want.
+ function automation(value, startTime, endTime) {
+ // Do nothing.
+ }
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam setValueAtTime() functionality.'
+ },
+ function(task, should) {
+ createAudioGraphAndTest(
+ task, should, numberOfTests, 1, setValue, automation,
+ 'setValueAtTime()', maxAllowedError, createConstantArray);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html
new file mode 100644
index 0000000000..ed0c15fb9b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurve-exceptions.html
@@ -0,0 +1,426 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Exceptions from setValueCurveAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ // Some short duration because we don't need to run the test for very
+ // long.
+ let testDurationSec = 0.125;
+ let testDurationFrames = testDurationSec * sampleRate;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('setValueCurve', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let curve = new Float32Array(2);
+
+ // Start time and duration for setValueCurveAtTime
+ let curveStartTime = 0.1 * testDurationSec;
+ let duration = 0.1 * testDurationSec;
+
+ // Some time that is known to be during the setValueCurveTime interval.
+ let automationTime = curveStartTime + duration / 2;
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, curveStartTime, duration);
+ },
+ 'setValueCurveAtTime(curve, ' + curveStartTime + ', ' + duration +
+ ')')
+ .notThrow();
+
+ should(
+ function() {
+ g.gain.setValueAtTime(1, automationTime);
+ },
+ 'setValueAtTime(1, ' + automationTime + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.linearRampToValueAtTime(1, automationTime);
+ },
+ 'linearRampToValueAtTime(1, ' + automationTime + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.exponentialRampToValueAtTime(1, automationTime);
+ },
+ 'exponentialRampToValueAtTime(1, ' + automationTime + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.setTargetAtTime(1, automationTime, 1);
+ },
+ 'setTargetAtTime(1, ' + automationTime + ', 1)')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ g.gain.setValueAtTime(1, curveStartTime + 1.1 * duration);
+ },
+ 'setValueAtTime(1, ' + (curveStartTime + 1.1 * duration) + ')')
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define('value setter', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let curve = new Float32Array(2);
+
+ // Start time and duration for setValueCurveAtTime
+ let curveStartTime = 0.;
+ let duration = 0.2 * testDurationSec;
+
+ // Some time that is known to be during the setValueCurveTime interval.
+ let automationTime = 0.;
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, curveStartTime, duration);
+ },
+ 'setValueCurveAtTime(curve, ' + curveStartTime + ', ' + duration +
+ ')')
+ .notThrow();
+
+ should(
+ function() {
+ g.gain.value = 0.;
+ },
+ 'value setter')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define('automations', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+
+ let curve = new Float32Array(2);
+ // Start time and duration for setValueCurveAtTime
+ let startTime = 0;
+ let timeInterval = testDurationSec / 10;
+ let time;
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.linearRampToValueAtTime(1, startTime);
+ }, 'linearRampToValueAtTime(1, ' + startTime + ')').notThrow();
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.exponentialRampToValueAtTime(1, startTime);
+ }, 'exponentialRampToValueAtTime(1, ' + startTime + ')').notThrow();
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.setTargetAtTime(1, startTime, 0.1);
+ }, 'setTargetAtTime(1, ' + startTime + ', 0.1)').notThrow();
+
+ startTime += timeInterval;
+ should(() => {
+ g.gain.setValueCurveAtTime(curve, startTime, 0.1);
+ }, 'setValueCurveAtTime(curve, ' + startTime + ', 0.1)').notThrow();
+
+ // Now try to setValueCurve that overlaps each of the above automations
+ startTime = timeInterval / 2;
+
+ for (let k = 0; k < 4; ++k) {
+ time = startTime + timeInterval * k;
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, 0.01);
+ },
+ 'setValueCurveAtTime(curve, ' + time + ', 0.01)')
+ .throw(DOMException, 'NotSupportedError');
+ }
+
+ // Elements of setValueCurve should be finite.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(
+ Float32Array.from([NaN, NaN]), time, 0.01);
+ },
+ 'setValueCurveAtTime([NaN, NaN], ' + time + ', 0.01)')
+ .throw(TypeError);
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(
+ Float32Array.from([1, Infinity]), time, 0.01);
+ },
+ 'setValueCurveAtTime([1, Infinity], ' + time + ', 0.01)')
+ .throw(TypeError);
+
+ let d = context.createDelay();
+ // Check that we get warnings for out-of-range values and also throw for
+ // non-finite values.
+ should(
+ () => {
+ d.delayTime.setValueCurveAtTime(
+ Float32Array.from([1, 5]), time, 0.01);
+ },
+ 'delayTime.setValueCurveAtTime([1, 5], ' + time + ', 0.01)')
+ .notThrow();
+
+ should(
+ () => {
+ d.delayTime.setValueCurveAtTime(
+ Float32Array.from([1, 5, Infinity]), time, 0.01);
+ },
+ 'delayTime.setValueCurveAtTime([1, 5, Infinity], ' + time +
+ ', 0.01)')
+ .throw(TypeError);
+
+ // One last test that prints out lots of digits for the time.
+ time = Math.PI / 100;
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, 0.01);
+ },
+ 'setValueCurveAtTime(curve, ' + time + ', 0.01)')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define('catch-exception', (task, should) => {
+ // Verify that the curve isn't inserted into the time line even if we
+ // catch the exception.
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let gain = context.createGain();
+ let source = context.createBufferSource();
+ let buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ source.buffer = buffer;
+ source.loop = true;
+
+ source.connect(gain);
+ gain.connect(context.destination);
+
+ gain.gain.setValueAtTime(1, 0);
+ try {
+ // The value curve has an invalid element. This automation shouldn't
+ // be inserted into the timeline at all.
+ gain.gain.setValueCurveAtTime(
+ Float32Array.from([0, NaN]), 128 / context.sampleRate, .5);
+ } catch (e) {
+ };
+ source.start();
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ // Since the setValueCurve wasn't inserted, the output should be
+ // exactly 1 for the entire duration.
+ should(
+ resultBuffer.getChannelData(0),
+ 'Handled setValueCurve exception so output')
+ .beConstantValueOf(1);
+
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('start-end', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let curve = new Float32Array(2);
+
+ // Verify that a setValueCurve can start at the end of an automation.
+ let time = 0;
+ let timeInterval = testDurationSec / 50;
+ should(() => {
+ g.gain.setValueAtTime(1, time);
+ }, 'setValueAtTime(1, ' + time + ')').notThrow();
+
+ time += timeInterval;
+ should(() => {
+ g.gain.linearRampToValueAtTime(0, time);
+ }, 'linearRampToValueAtTime(0, ' + time + ')').notThrow();
+
+ // setValueCurve starts at the end of the linear ramp. This should be
+ // fine.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // exponentialRamp ending one interval past the setValueCurve should be
+ // fine.
+ time += 2 * timeInterval;
+ should(() => {
+ g.gain.exponentialRampToValueAtTime(1, time);
+ }, 'exponentialRampToValueAtTime(1, ' + time + ')').notThrow();
+
+ // setValueCurve starts at the end of the exponential ramp. This should
+ // be fine.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // setValueCurve at the end of the setValueCurve should be fine.
+ time += timeInterval;
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // setValueAtTime at the end of setValueCurve should be fine.
+ time += timeInterval;
+ should(() => {
+ g.gain.setValueAtTime(0, time);
+ }, 'setValueAtTime(0, ' + time + ')').notThrow();
+
+ // setValueCurve at the end of setValueAtTime should be fine.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, time, timeInterval);
+ },
+ 'setValueCurveAtTime(..., ' + time + ', ' + timeInterval + ')')
+ .notThrow();
+
+ // setTarget starting at the end of setValueCurve should be fine.
+ time += timeInterval;
+ should(() => {
+ g.gain.setTargetAtTime(1, time, 1);
+ }, 'setTargetAtTime(1, ' + time + ', 1)').notThrow();
+
+ task.done();
+ });
+
+ audit.define('curve overlap', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let startTime = 5;
+ let startTimeLater = 10;
+ let startTimeEarlier = 2.5;
+ let curveDuration = 10;
+ let curveDurationShorter = 5;
+ let curve = [1, 2, 3];
+
+ // An initial curve event
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime, curveDuration);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime}, ${curveDuration})`)
+ .notThrow();
+
+ // Check that an exception is thrown when trying to overlap two curves,
+ // in various ways
+
+ // Same start time and end time (curve exactly overlapping)
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime, curveDuration);
+ },
+ `second g.gain.setValueCurveAtTime([${curve}], ${startTime}, ${curveDuration})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Same start time, shorter end time
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime, curveDurationShorter);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime}, ${curveDurationShorter})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Earlier start time, end time after the start time an another curve
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTimeEarlier, curveDuration);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTimeEarlier}, ${curveDuration})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Start time after the start time of the other curve, but earlier than
+ // its end.
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTimeLater, curveDuration);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTimeLater}, ${curveDuration})`)
+ .throw(DOMException, 'NotSupportedError');
+
+ // New event wholly contained inside existing event
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime + 1, curveDuration - 1);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime+1}, ${curveDuration-1})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Old event completely contained inside new event
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(curve, startTime - 1, curveDuration + 1);
+ },
+ `g.gain.setValueCurveAtTime([${curve}], ${startTime-1}, ${curveDuration+1})`)
+ .throw(DOMException, 'NotSupportedError');
+ // Setting an event exactly at the end of the curve should work.
+ should(
+ () => {
+ g.gain.setValueAtTime(1.0, startTime + curveDuration);
+ },
+ `g.gain.setValueAtTime(1.0, ${startTime + curveDuration})`)
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define('curve lengths', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+ let g = context.createGain();
+ let time = 0;
+
+ // Check for invalid curve lengths
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(Float32Array.from([]), time, 0.01);
+ },
+ 'setValueCurveAtTime([], ' + time + ', 0.01)')
+ .throw(DOMException, 'InvalidStateError');
+
+ should(
+ () => {
+ g.gain.setValueCurveAtTime(Float32Array.from([1]), time, 0.01);
+ },
+ 'setValueCurveAtTime([1], ' + time + ', 0.01)')
+ .throw(DOMException, 'InvalidStateError');
+
+ should(() => {
+ g.gain.setValueCurveAtTime(Float32Array.from([1, 2]), time, 0.01);
+ }, 'setValueCurveAtTime([1,2], ' + time + ', 0.01)').notThrow();
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html
new file mode 100644
index 0000000000..de8406244b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-setValueCurveAtTime.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioParam.setValueCurveAtTime
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Play a long DC signal out through an AudioGainNode and for each time
+ // interval call setValueCurveAtTime() to set the values for the duration
+ // of the interval. Each curve is a sine wave, and we assume that the
+ // time interval is not an exact multiple of the period. This causes a
+ // discontinuity between time intervals which is used to test timing.
+
+ // Number of tests to run.
+ let numberOfTests = 20;
+
+ // Max allowed difference between the rendered data and the expected
+ // result. Because of the linear interpolation, the rendered curve isn't
+ // exactly the same as the reference. This value is experimentally
+ // determined.
+ let maxAllowedError = 3.7194e-6;
+
+ // The amplitude of the sine wave.
+ let sineAmplitude = 1;
+
+ // Frequency of the sine wave.
+ let freqHz = 440;
+
+ // Curve to use for setValueCurveAtTime().
+ let curve;
+
+ // Sets the curve data for the entire time interval.
+ function automation(value, startTime, endTime) {
+ gainNode.gain.setValueCurveAtTime(
+ curve, startTime, endTime - startTime);
+ }
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'AudioParam setValueCurveAtTime() functionality.'
+ },
+ function(task, should) {
+ // The curve of values to use.
+ curve = createSineWaveArray(
+ timeInterval, freqHz, sineAmplitude, sampleRate);
+
+ createAudioGraphAndTest(
+ task, should, numberOfTests, sineAmplitude,
+ function(k) {
+ // Don't need to set the value.
+ },
+ automation, 'setValueCurveAtTime()', maxAllowedError,
+ createReferenceSineArray,
+ 2 * Math.PI * sineAmplitude * freqHz / sampleRate,
+ differenceErrorMetric);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html
new file mode 100644
index 0000000000..9084942f70
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/audioparam-summingjunction.html
@@ -0,0 +1,120 @@
+<!DOCTYPE html>
+<!--
+Tests that multiple audio-rate signals (AudioNode outputs) can be connected to an AudioParam
+and that these signals are summed, along with the AudioParams intrinsic value.
+-->
+<html>
+ <head>
+ <title>
+ audioparam-summingjunction.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/mix-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100.0;
+ let lengthInSeconds = 1;
+
+ let context = 0;
+
+ // Buffers used by the two gain controlling sources.
+ let linearRampBuffer;
+ let toneBuffer;
+ let toneFrequency = 440;
+
+ // Arbitrary non-zero value.
+ let baselineGain = 5;
+
+ // Allow for a small round-off error.
+ let maxAllowedError = 1e-6;
+
+ function checkResult(renderedBuffer, should) {
+ let renderedData = renderedBuffer.getChannelData(0);
+
+ // Get buffer data from the two sources used to control gain.
+ let linearRampData = linearRampBuffer.getChannelData(0);
+ let toneData = toneBuffer.getChannelData(0);
+
+ let n = renderedBuffer.length;
+
+ should(n, 'Rendered signal length').beEqualTo(linearRampBuffer.length);
+
+ // Check that the rendered result exactly matches the sum of the
+ // intrinsic gain plus the two sources used to control gain. This is
+ // because we're changing the gain of a signal having constant value 1.
+ let success = true;
+ for (let i = 0; i < n; ++i) {
+ let expectedValue = baselineGain + linearRampData[i] + toneData[i];
+ let error = Math.abs(expectedValue - renderedData[i]);
+
+ if (error > maxAllowedError) {
+ success = false;
+ break;
+ }
+ }
+
+ should(
+ success,
+ 'Rendered signal matches sum of two audio-rate gain changing signals plus baseline gain')
+ .beTrue();
+ }
+
+ audit.define('test', function(task, should) {
+ let sampleFrameLength = sampleRate * lengthInSeconds;
+
+ // Create offline audio context.
+ context = new OfflineAudioContext(1, sampleFrameLength, sampleRate);
+
+ // Create buffer used by the source which will have its gain controlled.
+ let constantOneBuffer =
+ createConstantBuffer(context, sampleFrameLength, 1);
+ let constantSource = context.createBufferSource();
+ constantSource.buffer = constantOneBuffer;
+
+ // Create 1st buffer used to control gain (a linear ramp).
+ linearRampBuffer = createLinearRampBuffer(context, sampleFrameLength);
+ let gainSource1 = context.createBufferSource();
+ gainSource1.buffer = linearRampBuffer;
+
+ // Create 2st buffer used to control gain (a simple sine wave tone).
+ toneBuffer =
+ createToneBuffer(context, toneFrequency, lengthInSeconds, 1);
+ let gainSource2 = context.createBufferSource();
+ gainSource2.buffer = toneBuffer;
+
+ // Create a gain node controlling the gain of constantSource and make
+ // the connections.
+ let gainNode = context.createGain();
+
+ // Intrinsic baseline gain.
+ // This gain value should be summed with gainSource1 and gainSource2.
+ gainNode.gain.value = baselineGain;
+
+ constantSource.connect(gainNode);
+ gainNode.connect(context.destination);
+
+ // Connect two audio-rate signals to control the .gain AudioParam.
+ gainSource1.connect(gainNode.gain);
+ gainSource2.connect(gainNode.gain);
+
+ // Start all sources at time 0.
+ constantSource.start(0);
+ gainSource1.start(0);
+ gainSource2.start(0);
+
+ context.startRendering().then(buffer => {
+ checkResult(buffer, should);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js
new file mode 100644
index 0000000000..43279f91d6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate-testing.js
@@ -0,0 +1,155 @@
+// Test k-rate vs a-rate AudioParams.
+//
+// |options| describes how the testing of the AudioParam should be done:
+//
+// sourceNodeName: name of source node to use for testing; defaults to
+// 'OscillatorNode'. If set to 'none', then no source node
+// is created for testing and it is assumed that the AudioNode
+// under test are sources and need to be started.
+// verifyPieceWiseConstant: if true, verify that the k-rate output is
+// piecewise constant for each render quantum.
+// nodeName: name of the AudioNode to be tested
+// nodeOptions: options to be used in the AudioNode constructor
+//
+// prefix: Prefix for all output messages (to make them unique for
+// testharness)
+//
+// rateSettings: A vector of dictionaries specifying how to set the automation
+// rate(s):
+// name: Name of the AudioParam
+// value: The automation rate for the AudioParam given by |name|.
+//
+// automations: A vector of dictionaries specifying how to automate each
+// AudioParam:
+// name: Name of the AudioParam
+//
+// methods: A vector of dictionaries specifying the automation methods to
+// be used for testing:
+// name: Automation method to call
+// options: Arguments for the automation method
+//
+// Testing is somewhat rudimentary. We create two nodes of the same type. One
+// node uses the default automation rates for each AudioParam (expecting them to
+// be a-rate). The second node sets the automation rate of AudioParams to
+// "k-rate". The set is speciified by |options.rateSettings|.
+//
+// For both of these nodes, the same set of automation methods (given by
+// |options.automations|) is applied. A simple oscillator is connected to each
+// node which in turn are connected to different channels of an offline context.
+// Channel 0 is the k-rate node output; channel 1, the a-rate output; and
+// channel 3, the difference between the outputs.
+//
+// Success is declared if the difference signal is not exactly zero. This means
+// the the automations did different things, as expected.
+//
+// The promise from |startRendering| is returned.
+function doTest(context, should, options) {
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = null;
+
+ // Skip creating a source to drive the graph if |sourceNodeName| is 'none'.
+ // If |sourceNodeName| is given, use that, else default to OscillatorNode.
+ if (options.sourceNodeName !== 'none') {
+ src = new window[options.sourceNodeName || 'OscillatorNode'](context);
+ }
+
+ let kRateNode = new window[options.nodeName](context, options.nodeOptions);
+ let aRateNode = new window[options.nodeName](context, options.nodeOptions);
+ let inverter = new GainNode(context, {gain: -1});
+
+ // Set kRateNode filter to use k-rate params.
+ options.rateSettings.forEach(setting => {
+ kRateNode[setting.name].automationRate = setting.value;
+ // Mostly for documentation in the output. These should always
+ // pass.
+ should(
+ kRateNode[setting.name].automationRate,
+ `${options.prefix}: Setting ${
+ setting.name
+ }.automationRate to "${setting.value}"`)
+ .beEqualTo(setting.value);
+ });
+
+ // Run through all automations for each node separately. (Mostly to keep
+ // output of automations together.)
+ options.automations.forEach(param => {
+ param.methods.forEach(method => {
+ // Most for documentation in the output. These should never throw.
+ let message = `${param.name}.${method.name}(${method.options})`
+ should(() => {
+ kRateNode[param.name][method.name](...method.options);
+ }, options.prefix + ': k-rate node: ' + message).notThrow();
+ });
+ });
+ options.automations.forEach(param => {
+ param.methods.forEach(method => {
+ // Most for documentation in the output. These should never throw.
+ let message = `${param.name}.${method.name}(${method.options})`
+ should(() => {
+ aRateNode[param.name][method.name](...method.options);
+ }, options.prefix + ': a-rate node:' + message).notThrow();
+ });
+ });
+
+ // Connect the source, if specified.
+ if (src) {
+ src.connect(kRateNode);
+ src.connect(aRateNode);
+ }
+
+ // The k-rate result is channel 0, and the a-rate result is channel 1.
+ kRateNode.connect(merger, 0, 0);
+ aRateNode.connect(merger, 0, 1);
+
+ // Compute the difference between the a-rate and k-rate results and send
+ // that to channel 2.
+ kRateNode.connect(merger, 0, 2);
+ aRateNode.connect(inverter).connect(merger, 0, 2);
+
+ if (src) {
+ src.start();
+ } else {
+ // If there's no source, then assume the test nodes are sources and start
+ // them.
+ kRateNode.start();
+ aRateNode.start();
+ }
+
+ return context.startRendering().then(renderedBuffer => {
+ let kRateOutput = renderedBuffer.getChannelData(0);
+ let aRateOutput = renderedBuffer.getChannelData(1);
+ let diff = renderedBuffer.getChannelData(2);
+
+ // Some informative messages to print out values of the k-rate and
+ // a-rate outputs. These should always pass.
+ should(
+ kRateOutput, `${options.prefix}: Output of k-rate ${options.nodeName}`)
+ .beEqualToArray(kRateOutput);
+ should(
+ aRateOutput, `${options.prefix}: Output of a-rate ${options.nodeName}`)
+ .beEqualToArray(aRateOutput);
+
+ // The real test. If k-rate AudioParam is working correctly, the
+ // k-rate result MUST differ from the a-rate result.
+ should(
+ diff,
+ `${
+ options.prefix
+ }: Difference between a-rate and k-rate ${options.nodeName}`)
+ .notBeConstantValueOf(0);
+
+ if (options.verifyPieceWiseConstant) {
+ // Verify that the output from the k-rate parameter is step-wise
+ // constant.
+ for (let k = 0; k < kRateOutput.length; k += 128) {
+ should(
+ kRateOutput.slice(k, k + 128),
+ `${options.prefix} k-rate output [${k}: ${k + 127}]`)
+ .beConstantValueOf(kRateOutput[k]);
+ }
+ }
+ });
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html
new file mode 100644
index 0000000000..a3c11994bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html
@@ -0,0 +1,167 @@
+<!doctype html>
+<html>
+ <head>
+ <title>AudioParam.automationRate tests</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // For each node that has an AudioParam, verify that the default
+ // |automationRate| has the expected value and that we can change it or
+ // throw an error if it can't be changed.
+
+ // Any valid sample rate is fine; we don't actually render anything in the
+ // tests.
+ let sampleRate = 8000;
+
+ let audit = Audit.createTaskRunner();
+
+ // Array of tests. Each test is a dictonary consisting of the name of the
+ // node and an array specifying the AudioParam's of the node. This array
+ // in turn gives the name of the AudioParam, the default value for the
+ // |automationRate|, and whether it is fixed (isFixed).
+ const tests = [
+ {
+ nodeName: 'AudioBufferSourceNode',
+ audioParams: [
+ {name: 'detune', defaultRate: 'k-rate', isFixed: true},
+ {name: 'playbackRate', defaultRate: 'k-rate', isFixed: true}
+ ]
+ },
+ {
+ nodeName: 'BiquadFilterNode',
+ audioParams: [
+ {name: 'frequency', defaultRate: 'a-rate', isFixed: false},
+ {name: 'detune', defaultRate: 'a-rate', isFixed: false},
+ {name: 'Q', defaultRate: 'a-rate', isFixed: false},
+ {name: 'gain', defaultRate: 'a-rate', isFixed: false},
+ ]
+ },
+ {
+ nodeName: 'ConstantSourceNode',
+ audioParams: [{name: 'offset', defaultRate: 'a-rate', isFixed: false}]
+ },
+ {
+ nodeName: 'DelayNode',
+ audioParams:
+ [{name: 'delayTime', defaultRate: 'a-rate', isFixed: false}]
+ },
+ {
+ nodeName: 'DynamicsCompressorNode',
+ audioParams: [
+ {name: 'threshold', defaultRate: 'k-rate', isFixed: true},
+ {name: 'knee', defaultRate: 'k-rate', isFixed: true},
+ {name: 'ratio', defaultRate: 'k-rate', isFixed: true},
+ {name: 'attack', defaultRate: 'k-rate', isFixed: true},
+ {name: 'release', defaultRate: 'k-rate', isFixed: true}
+ ]
+ },
+ {
+ nodeName: 'GainNode',
+ audioParams: [{name: 'gain', defaultRate: 'a-rate', isFixed: false}]
+ },
+ {
+ nodeName: 'OscillatorNode',
+ audioParams: [
+ {name: 'frequency', defaultRate: 'a-rate', isFixed: false},
+ {name: 'detune', defaultRate: 'a-rate', isFixed: false}
+ ]
+ },
+ {
+ nodeName: 'PannerNode',
+ audioParams: [
+ {name: 'positionX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionZ', defaultRate: 'a-rate', isFixed: false},
+ {name: 'orientationX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'orientationY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'orientationZ', defaultRate: 'a-rate', isFixed: false},
+ ]
+ },
+ {
+ nodeName: 'StereoPannerNode',
+ audioParams: [{name: 'pan', defaultRate: 'a-rate', isFixed: false}]
+ },
+ ];
+
+ tests.forEach(test => {
+ // Define a separate test for each test entry.
+ audit.define(test.nodeName, (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: sampleRate, sampleRate: sampleRate});
+ // Construct the node and test each AudioParam of the node.
+ let node = new window[test.nodeName](context);
+ test.audioParams.forEach(param => {
+ testAudioParam(
+ should, {nodeName: test.nodeName, node: node, param: param});
+ });
+
+ task.done();
+ });
+ });
+
+ // AudioListener needs it's own special test since it's not a node.
+ audit.define('AudioListener', (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: sampleRate, sampleRate: sampleRate});
+
+ [{name: 'positionX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'positionZ', defaultRate: 'a-rate', isFixed: false},
+ {name: 'forwardX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'forwardY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'forwardZ', defaultRate: 'a-rate', isFixed: false},
+ {name: 'upX', defaultRate: 'a-rate', isFixed: false},
+ {name: 'upY', defaultRate: 'a-rate', isFixed: false},
+ {name: 'upZ', defaultRate: 'a-rate', isFixed: false},
+ ].forEach(param => {
+ testAudioParam(should, {
+ nodeName: 'AudioListener',
+ node: context.listener,
+ param: param
+ });
+ });
+ task.done();
+ });
+
+ audit.run();
+
+ function testAudioParam(should, options) {
+ let param = options.param;
+ let audioParam = options.node[param.name];
+ let defaultRate = param.defaultRate;
+
+ // Verify that the default value is correct.
+ should(
+ audioParam.automationRate,
+ `Default ${options.nodeName}.${param.name}.automationRate`)
+ .beEqualTo(defaultRate);
+
+ // Try setting the rate to a different rate. If the |automationRate|
+ // is fixed, expect an error. Otherwise, expect no error and expect
+ // the value is changed to the new value.
+ let newRate = defaultRate === 'a-rate' ? 'k-rate' : 'a-rate';
+ let setMessage = `Set ${
+ options.nodeName
+ }.${param.name}.automationRate to "${newRate}"`
+
+ if (param.isFixed) {
+ should(() => audioParam.automationRate = newRate, setMessage)
+ .throw(DOMException, 'InvalidStateError');
+ }
+ else {
+ should(() => audioParam.automationRate = newRate, setMessage)
+ .notThrow();
+ should(
+ audioParam.automationRate,
+ `${options.nodeName}.${param.name}.automationRate`)
+ .beEqualTo(newRate);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html
new file mode 100644
index 0000000000..ac1da8cd51
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/cancel-scheduled-values.html
@@ -0,0 +1,155 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ cancelScheduledValues
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let sampleRate = 8000;
+ let renderFrames = 8000;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'cancel-time', description: 'handle cancelTime values'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: renderFrames,
+ sampleRate: sampleRate
+ });
+
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ should(
+ () => src.offset.cancelScheduledValues(-1),
+ 'cancelScheduledValues(-1)')
+ .throw(RangeError);
+
+ // These are TypeErrors because |cancelTime| is a
+ // double, not unrestricted double.
+ should(
+ () => src.offset.cancelScheduledValues(NaN),
+ 'cancelScheduledValues(NaN)')
+ .throw(TypeError);
+
+ should(
+ () => src.offset.cancelScheduledValues(Infinity),
+ 'cancelScheduledValues(Infinity)')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'cancel1', description: 'cancel setValueCurve'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: renderFrames,
+ sampleRate: sampleRate
+ });
+
+ let src = new ConstantSourceNode(context);
+ let gain = new GainNode(context);
+ src.connect(gain).connect(context.destination);
+
+ // Initial time and value for first automation (setValue)
+ let time0 = 0;
+ let value0 = 0.5;
+
+ // Time and duration of the setValueCurve. We'll also schedule a
+ // setValue at the same time.
+ let value1 = 1.5;
+ let curveStartTime = 0.25;
+ let curveDuration = 0.25;
+
+ // Time at which to cancel events
+ let cancelTime = 0.3;
+
+ // Time and value for event added after cancelScheduledValues has
+ // been called.
+ let time2 = curveStartTime + curveDuration / 2;
+ let value2 = 3;
+
+ // Self-consistency checks for the test.
+ should(cancelTime, 'cancelTime is after curve start')
+ .beGreaterThan(curveStartTime);
+ should(cancelTime, 'cancelTime is before curve ends')
+ .beLessThan(curveStartTime + curveDuration);
+
+ // These assertions are just to show what's happening
+ should(
+ () => gain.gain.setValueAtTime(value0, time0),
+ `gain.gain.setValueAtTime(${value0}, ${time0})`)
+ .notThrow();
+ // setValue at the sime time as the curve, to test that this event
+ // wasn't rmeoved.
+ should(
+ () => gain.gain.setValueAtTime(value1, curveStartTime),
+ `gain.gain.setValueAtTime(${value1}, ${curveStartTime})`)
+ .notThrow();
+
+ should(
+ () => gain.gain.setValueCurveAtTime(
+ [1, -1], curveStartTime, curveDuration),
+ `gain.gain.setValueCurveAtTime(..., ${curveStartTime}, ${
+ curveDuration})`)
+ .notThrow();
+
+ // An event after the curve to verify this is removed.
+ should(
+ () => gain.gain.setValueAtTime(
+ 99, curveStartTime + curveDuration),
+ `gain.gain.setValueAtTime(99, ${
+ curveStartTime + curveDuration})`)
+ .notThrow();
+
+ // Cancel events now.
+ should(
+ () => gain.gain.cancelScheduledValues(cancelTime),
+ `gain.gain.cancelScheduledValues(${cancelTime})`)
+ .notThrow();
+
+ // Simple check that the setValueCurve is gone, by scheduling
+ // something in the middle of the (now deleted) event
+ should(
+ () => gain.gain.setValueAtTime(value2, time2),
+ `gain.gain.setValueAtTime(${value2}, ${time2})`)
+ .notThrow();
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let audio = buffer.getChannelData(0);
+
+ // After canceling events, verify that the outputs have the
+ // desired values.
+ let curveFrame = curveStartTime * context.sampleRate;
+ should(
+ audio.slice(0, curveFrame), `output[0:${curveFrame - 1}]`)
+ .beConstantValueOf(value0);
+
+ let time2Frame = time2 * context.sampleRate;
+ should(
+ audio.slice(curveFrame, time2Frame),
+ `output[${curveFrame}:${time2Frame - 1}]`)
+ .beConstantValueOf(value1);
+
+ should(audio.slice(time2Frame), `output[${time2Frame}:]`)
+ .beConstantValueOf(value2);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html
new file mode 100644
index 0000000000..b846f982ab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/event-insertion.html
@@ -0,0 +1,411 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Handling of Event Insertion
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audio-param.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Use a power of two for the sample rate so there's no round-off in
+ // computing time from frame.
+ let sampleRate = 16384;
+
+ audit.define(
+ {label: 'Insert same event at same time'}, (task, should) => {
+ // Context for testing.
+ let context = new OfflineAudioContext(
+ {length: 16384, sampleRate: sampleRate});
+
+ // The source node to use. Automations will be scheduled here.
+ let src = new ConstantSourceNode(context, {offset: 0});
+ src.connect(context.destination);
+
+ // An array of tests to be done. Each entry specifies the event
+ // type and the event time. The events are inserted in the order
+ // given (in |values|), and the second event should be inserted
+ // after the first one, as required by the spec.
+ let testCases = [
+ {
+ event: 'setValueAtTime',
+ frame: RENDER_QUANTUM_FRAMES,
+ values: [99, 1],
+ outputTestFrame: RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 1
+ },
+ {
+ event: 'linearRampToValueAtTime',
+ frame: 2 * RENDER_QUANTUM_FRAMES,
+ values: [99, 2],
+ outputTestFrame: 2 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 2
+ },
+ {
+ event: 'exponentialRampToValueAtTime',
+ frame: 3 * RENDER_QUANTUM_FRAMES,
+ values: [99, 3],
+ outputTestFrame: 3 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 3
+ },
+ {
+ event: 'setValueCurveAtTime',
+ frame: 3 * RENDER_QUANTUM_FRAMES,
+ values: [[3, 4]],
+ extraArgs: RENDER_QUANTUM_FRAMES / context.sampleRate,
+ outputTestFrame: 4 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 4
+ },
+ {
+ event: 'setValueAtTime',
+ frame: 5 * RENDER_QUANTUM_FRAMES - 1,
+ values: [99, 1, 5],
+ outputTestFrame: 5 * RENDER_QUANTUM_FRAMES,
+ expectedOutputValue: 5
+ }
+ ];
+
+ testCases.forEach(entry => {
+ entry.values.forEach(value => {
+ let eventTime = entry.frame / context.sampleRate;
+ let message = eventToString(
+ entry.event, value, eventTime, entry.extraArgs);
+ // This is mostly to print out the event that is getting
+ // inserted. It should never ever throw.
+ should(() => {
+ src.offset[entry.event](value, eventTime, entry.extraArgs);
+ }, message).notThrow();
+ });
+ });
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let audio = audioBuffer.getChannelData(0);
+
+ // Look through the test cases to figure out what the correct
+ // output values should be.
+ testCases.forEach(entry => {
+ let expected = entry.expectedOutputValue;
+ let frame = entry.outputTestFrame;
+ let time = frame / context.sampleRate;
+ should(
+ audio[frame], `Output at frame ${frame} (time ${time})`)
+ .beEqualTo(expected);
+ });
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Linear + Expo',
+ description: 'Different events at same time'
+ },
+ (task, should) => {
+ // Should be a linear ramp up to the event time, and after a
+ // constant value because the exponential ramp has ended.
+ let testCase = [
+ {event: 'linearRampToValueAtTime', value: 2, relError: 0},
+ {event: 'setValueAtTime', value: 99},
+ {event: 'exponentialRampToValueAtTime', value: 3},
+ ];
+ let eventFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let prefix = 'Linear+Expo: ';
+
+ testEventInsertion(prefix, should, eventFrame, testCase)
+ .then(expectConstant(prefix, should, eventFrame, testCase))
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Expo + Linear',
+ description: 'Different events at same time',
+ },
+ (task, should) => {
+ // Should be an exponential ramp up to the event time, and after a
+ // constant value because the linear ramp has ended.
+ let testCase = [
+ {
+ event: 'exponentialRampToValueAtTime',
+ value: 3,
+ relError: 4.2533e-6
+ },
+ {event: 'setValueAtTime', value: 99},
+ {event: 'linearRampToValueAtTime', value: 2},
+ ];
+ let eventFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let prefix = 'Expo+Linear: ';
+
+ testEventInsertion(prefix, should, eventFrame, testCase)
+ .then(expectConstant(prefix, should, eventFrame, testCase))
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Linear + SetTarget',
+ description: 'Different events at same time',
+ },
+ (task, should) => {
+ // Should be a linear ramp up to the event time, and then a
+ // decaying value.
+ let testCase = [
+ {event: 'linearRampToValueAtTime', value: 3, relError: 0},
+ {event: 'setValueAtTime', value: 100},
+ {event: 'setTargetAtTime', value: 0, extraArgs: 0.1},
+ ];
+ let eventFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let prefix = 'Linear+SetTarget: ';
+
+ testEventInsertion(prefix, should, eventFrame, testCase)
+ .then(audioBuffer => {
+ let audio = audioBuffer.getChannelData(0);
+ let prefix = 'Linear+SetTarget: ';
+ let eventTime = eventFrame / sampleRate;
+ let expectedValue = methodMap[testCase[0].event](
+ (eventFrame - 1) / sampleRate, 1, 0, testCase[0].value,
+ eventTime);
+ should(
+ audio[eventFrame - 1],
+ prefix +
+ `At time ${
+ (eventFrame - 1) / sampleRate
+ } (frame ${eventFrame - 1}) output`)
+ .beCloseTo(
+ expectedValue,
+ {threshold: testCase[0].relError || 0});
+
+ // The setValue should have taken effect
+ should(
+ audio[eventFrame],
+ prefix +
+ `At time ${eventTime} (frame ${eventFrame}) output`)
+ .beEqualTo(testCase[1].value);
+
+ // The final event is setTarget. Compute the expected output.
+ let actual = audio.slice(eventFrame);
+ let expected = new Float32Array(actual.length);
+ for (let k = 0; k < expected.length; ++k) {
+ let t = (eventFrame + k) / sampleRate;
+ expected[k] = audioParamSetTarget(
+ t, testCase[1].value, eventTime, testCase[2].value,
+ testCase[2].extraArgs);
+ }
+ should(
+ actual,
+ prefix +
+ `At time ${eventTime} (frame ${
+ eventFrame
+ }) and later`)
+ .beCloseToArray(expected, {relativeThreshold: 2.6694e-7});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Multiple linear ramps at the same time',
+ description: 'Verify output'
+ },
+ (task, should) => {
+ testMultipleSameEvents(should, {
+ method: 'linearRampToValueAtTime',
+ prefix: 'Multiple linear ramps: ',
+ threshold: 0
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'Multiple exponential ramps at the same time',
+ description: 'Verify output'
+ },
+ (task, should) => {
+ testMultipleSameEvents(should, {
+ method: 'exponentialRampToValueAtTime',
+ prefix: 'Multiple exponential ramps: ',
+ threshold: 5.3924e-7
+ }).then(() => task.done());
+ });
+
+ audit.run();
+
+ // Takes a list of |testCases| consisting of automation methods and
+ // schedules them to occur at |eventFrame|. |prefix| is a prefix for
+ // messages produced by |should|.
+ //
+ // Each item in |testCases| is a dictionary with members:
+ // event - the name of automation method to be inserted,
+ // value - the value for the event,
+ // extraArgs - extra arguments if the event needs more than the value
+ // and time (such as setTargetAtTime).
+ function testEventInsertion(prefix, should, eventFrame, testCases) {
+ let context = new OfflineAudioContext(
+ {length: 4 * RENDER_QUANTUM_FRAMES, sampleRate: sampleRate});
+
+ // The source node to use. Automations will be scheduled here.
+ let src = new ConstantSourceNode(context, {offset: 0});
+ src.connect(context.destination);
+
+ // Initialize value to 1 at the beginning.
+ src.offset.setValueAtTime(1, 0);
+
+ // Test automations have this event time.
+ let eventTime = eventFrame / context.sampleRate;
+
+ // Sanity check that context is long enough for the test
+ should(
+ eventFrame < context.length,
+ prefix + 'Context length is long enough for the test')
+ .beTrue();
+
+ // Automations to be tested. The first event should be the actual
+ // output up to the event time. The last event should be the final
+ // output from the event time and onwards.
+ testCases.forEach(entry => {
+ should(
+ () => {
+ src.offset[entry.event](
+ entry.value, eventTime, entry.extraArgs);
+ },
+ prefix +
+ eventToString(
+ entry.event, entry.value, eventTime, entry.extraArgs))
+ .notThrow();
+ });
+
+ src.start();
+
+ return context.startRendering();
+ }
+
+ // Verify output of test where the final value of the automation is
+ // expected to be constant.
+ function expectConstant(prefix, should, eventFrame, testCases) {
+ return audioBuffer => {
+ let audio = audioBuffer.getChannelData(0);
+
+ let eventTime = eventFrame / sampleRate;
+
+ // Compute the expected value of the first automation one frame before
+ // the event time. This is a quick check that the correct automation
+ // was done.
+ let expectedValue = methodMap[testCases[0].event](
+ (eventFrame - 1) / sampleRate, 1, 0, testCases[0].value,
+ eventTime);
+ should(
+ audio[eventFrame - 1],
+ prefix +
+ `At time ${
+ (eventFrame - 1) / sampleRate
+ } (frame ${eventFrame - 1}) output`)
+ .beCloseTo(expectedValue, {threshold: testCases[0].relError});
+
+ // The last event scheduled is expected to set the value for all
+ // future times. Verify that the output has the expected value.
+ should(
+ audio.slice(eventFrame),
+ prefix +
+ `At time ${eventTime} (frame ${
+ eventFrame
+ }) and later, output`)
+ .beConstantValueOf(testCases[testCases.length - 1].value);
+ };
+ }
+
+ // Test output when two events of the same time are scheduled at the same
+ // time.
+ function testMultipleSameEvents(should, options) {
+ let {method, prefix, threshold} = options;
+
+ // Context for testing.
+ let context =
+ new OfflineAudioContext({length: 16384, sampleRate: sampleRate});
+
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ let initialValue = 1;
+
+ // Informative print
+ should(() => {
+ src.offset.setValueAtTime(initialValue, 0);
+ }, prefix + `setValueAtTime(${initialValue}, 0)`).notThrow();
+
+ let frame = 64;
+ let time = frame / context.sampleRate;
+ let values = [2, 7, 10];
+
+ // Schedule two events of the same type at the same time, but with
+ // different values.
+
+ values.forEach(value => {
+ // Informative prints to show what we're doing in this test.
+ should(
+ () => {
+ src.offset[method](value, time);
+ },
+ prefix +
+ eventToString(
+ method,
+ value,
+ time,
+ ))
+ .notThrow();
+ })
+
+ src.start();
+
+ return context.startRendering().then(audioBuffer => {
+ let actual = audioBuffer.getChannelData(0);
+
+ // The output should be a ramp from time 0 to the event time. But we
+ // only verify the value just before the event time, which should be
+ // fairly close to values[0]. (But compute the actual expected value
+ // to be sure.)
+ let expected = methodMap[method](
+ (frame - 1) / context.sampleRate, initialValue, 0, values[0],
+ time);
+ should(actual[frame - 1], prefix + `Output at frame ${frame - 1}`)
+ .beCloseTo(expected, {threshold: threshold, precision: 3});
+
+ // Any other values shouldn't show up in the output. Only the value
+ // from last event should appear. We only check the value at the
+ // event time.
+ should(
+ actual[frame], prefix + `Output at frame ${frame} (${time} sec)`)
+ .beEqualTo(values[values.length - 1]);
+ });
+ }
+
+ // Convert an automation method to a string for printing.
+ function eventToString(method, value, time, extras) {
+ let string = method + '(';
+ string += (value instanceof Array) ? `[${value}]` : value;
+ string += ', ' + time;
+ if (extras) {
+ string += ', ' + extras;
+ }
+ string += ')';
+ return string;
+ }
+
+ // Map between the automation method name and a function that computes the
+ // output value of the automation method.
+ const methodMap = {
+ linearRampToValueAtTime: audioParamLinearRamp,
+ exponentialRampToValueAtTime: audioParamExponentialRamp,
+ setValueAtTime: (t, v) => v
+ };
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html
new file mode 100644
index 0000000000..0b94bd70f9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audiobuffersource-connections.html
@@ -0,0 +1,164 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with inputs for AudioBufferSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Fairly abitrary sampleRate and somewhat duration
+ const sampleRate = 8000;
+ const testDuration = 0.25;
+
+ [['playbackRate', [1, 0], [2, testDuration]],
+ ['detune', [-1200, 0], [1200, testDuration]]]
+ .forEach(param => {
+ audit.define(
+ {label: param[0], description: `AudioBufferSource ${param[0]}`},
+ async (task, should) => {
+ await doTest(should, {
+ prefix: task.label,
+ paramName: param[0],
+ startValue: param[1],
+ endValue: param[2]
+ });
+ task.done();
+ });
+ });
+
+ audit.run();
+
+ async function doTest(should, options) {
+ // Test k-rate automation of AudioBufferSourceNode with connected
+ // input.
+ //
+ // A reference source node is created with an automation on the
+ // selected AudioParam. For simplicity, we just use a linear ramp from
+ // the minValue to the maxValue of the AudioParam.
+ //
+ // The test node has an input signal connected to the AudioParam. This
+ // input signal is created to match the automation on the reference
+ // node.
+ //
+ // Finally, the output from the two nodes must be identical if k-rate
+ // inputs are working correctly.
+ //
+ // Options parameter is a dictionary with the following required
+ // members:
+ // prefix - prefix to use for the messages.
+ // paramName - Name of the AudioParam to be tested
+
+ let {prefix, paramName, startValue, endValue} = options;
+
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Linear ramp to use for the buffer sources
+ let ramp = createLinearRampBuffer(context, context.length);
+
+ // Create the reference and test nodes.
+ let refNode;
+ let tstNode;
+
+ const nodeOptions = {buffer: ramp};
+
+ should(
+ () => refNode = new AudioBufferSourceNode(context, nodeOptions),
+ `${prefix}: refNode = new AudioBufferSourceNode(context, ${
+ JSON.stringify(nodeOptions)})`)
+ .notThrow();
+
+ should(
+ () => tstNode = new AudioBufferSourceNode(context, nodeOptions),
+ `${prefix}: tstNode = new AudioBufferSourceNode(context, ${
+ JSON.stringify(nodeOptions)})`)
+ .notThrow();
+
+
+ // Automate the AudioParam of the reference node with a linear ramp
+ should(
+ () => refNode[paramName].setValueAtTime(...startValue),
+ `${prefix}: refNode[${paramName}].setValueAtTime(${
+ startValue[0]}, ${startValue[1]})`)
+ .notThrow();
+
+ should(
+ () => refNode[paramName].linearRampToValueAtTime(...endValue),
+ `${prefix}: refNode[${paramName}].linearRampToValueAtTime(${
+ endValue[0]}, ${endValue[1]})`)
+ .notThrow();
+
+
+ // Create the input node and automate it so that it's output when added
+ // to the intrinsic value of the AudioParam we get the same values as
+ // the automations on the reference node.
+
+ // Compute the start and end values based on the defaultValue of the
+ // param and the desired startValue and endValue. The input is added to
+ // the intrinsic value of the AudioParam, so we need to account for
+ // that.
+
+ let mod;
+ should(
+ () => mod = new ConstantSourceNode(context, {offset: 0}),
+ `${prefix}: mod = new ConstantSourceNode(context, {offset: 0})`)
+ .notThrow();
+
+ let modStart = startValue[0] - refNode[paramName].defaultValue;
+ let modEnd = endValue[0] - refNode[paramName].defaultValue;
+ should(
+ () => mod.offset.setValueAtTime(modStart, startValue[1]),
+ `${prefix}: mod.offset.setValueAtTime(${modStart}, ${
+ startValue[1]})`)
+ .notThrow();
+ should(
+ () => mod.offset.linearRampToValueAtTime(modEnd, endValue[1]),
+ `${prefix}: mod.offset.linearRampToValueAtTime(${modEnd}, ${
+ endValue[1]})`)
+ .notThrow();
+
+ // Connect up everything.
+ should(
+ () => mod.connect(tstNode[paramName]),
+ `${prefix}: mod.connect(tstNode[${paramName}])`)
+ .notThrow();
+
+ refNode.connect(merger, 0, 0);
+ tstNode.connect(merger, 0, 1);
+
+ // Go!
+ refNode.start();
+ tstNode.start();
+ mod.start();
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Quick sanity check that output isn't zero. This means we messed up
+ // the connections or automations or the buffer source.
+ should(expected, `Expected k-rate ${paramName} AudioParam with input`)
+ .notBeConstantValueOf(0);
+ should(actual, `Actual k-rate ${paramName} AudioParam with input`)
+ .notBeConstantValueOf(0);
+
+ // The expected and actual results must be EXACTLY the same.
+ should(actual, `k-rate ${paramName} AudioParam with input`)
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html
new file mode 100644
index 0000000000..4d2eb40d55
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet-connections.https.html
@@ -0,0 +1,77 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams with inputs for AudioWorkletNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Use the worklet gain node to test k-rate parameters.
+ const filePath =
+ '../the-audioworklet-interface/processors/gain-processor.js';
+
+ // Context for testing
+ let context;
+
+ audit.define('Create Test Worklet', (task, should) => {
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ const testDuration = 4 * 128 / sampleRate;
+
+ context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'Construction of AudioWorklet')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('AudioWorklet k-rate AudioParam', async (task, should) => {
+ let src = new ConstantSourceNode(context);
+ let kRateNode = new AudioWorkletNode(context, 'gain');
+ src.connect(kRateNode).connect(context.destination);
+
+ let kRateParam = kRateNode.parameters.get('gain');
+ kRateParam.automationRate = 'k-rate';
+ kRateParam.value = 0;
+
+ let mod = new ConstantSourceNode(context);
+ mod.offset.setValueAtTime(0, 0);
+ mod.offset.linearRampToValueAtTime(
+ 10, context.length / context.sampleRate);
+ mod.connect(kRateParam);
+
+ mod.start();
+ src.start();
+
+ const audioBuffer = await context.startRendering();
+ let output = audioBuffer.getChannelData(0);
+
+ // Verify that the output isn't constantly zero.
+ should(output, 'output').notBeConstantValueOf(0);
+ // Verify that the output from the worklet is step-wise
+ // constant.
+ for (let k = 0; k < output.length; k += 128) {
+ should(output.slice(k, k + 128), ` k-rate output [${k}: ${k + 127}]`)
+ .beConstantValueOf(output[k]);
+ }
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html
new file mode 100644
index 0000000000..e891da6da2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html
@@ -0,0 +1,79 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of AudioWorkletNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Use the worklet gain node to test k-rate parameters.
+ const filePath =
+ '../the-audioworklet-interface/processors/gain-processor.js';
+
+ // Context for testing
+ let context;
+
+ audit.define('Create Test Worklet', (task, should) => {
+
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ const testDuration = 4 * 128 / sampleRate;
+
+ context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'Construction of AudioWorklet')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('AudioWorklet k-rate AudioParam', (task, should) => {
+ let src = new ConstantSourceNode(context);
+
+ let kRateNode = new AudioWorkletNode(context, 'gain');
+
+ src.connect(kRateNode).connect(context.destination);
+
+ let kRateParam = kRateNode.parameters.get('gain');
+ kRateParam.automationRate = 'k-rate';
+
+ // Automate the gain
+ kRateParam.setValueAtTime(0, 0);
+ kRateParam.linearRampToValueAtTime(
+ 10, context.length / context.sampleRate);
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let output = audioBuffer.getChannelData(0);
+
+ // Verify that the output from the worklet is step-wise
+ // constant.
+ for (let k = 0; k < output.length; k += 128) {
+ should(
+ output.slice(k, k + 128),
+ ` k-rate output [${k}: ${k + 127}]`)
+ .beConstantValueOf(output[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html
new file mode 100644
index 0000000000..ab9df8740f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad-connection.html
@@ -0,0 +1,456 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam Inputs for BiquadFilterNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // sampleRate and duration are fairly arbitrary. We use low values to
+ // limit the complexity of the test.
+ let sampleRate = 8192;
+ let testDuration = 0.5;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'Frequency AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test frequency AudioParam using a lowpass filter whose bandwidth
+ // is initially larger than the oscillator frequency. Then automate
+ // the frequency to 0 so that the output of the filter is 0 (because
+ // the cutoff is 0).
+ let oscFrequency = 440;
+
+ let options = {
+ sampleRate: sampleRate,
+ paramName: 'frequency',
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ filterOptions: {type: 'lowpass', frequency: 0},
+ autoStart:
+ {method: 'setValueAtTime', args: [2 * oscFrequency, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [0, testDuration / 4]
+ }
+ };
+
+ let buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+ let halfLength = expected.length / 2;
+
+ // Sanity check. The expected output should not be zero for
+ // the first half, but should be zero for the second half
+ // (because the filter bandwidth is exactly 0).
+ const prefix = 'Expected k-rate frequency with automation';
+
+ should(
+ expected.slice(0, halfLength),
+ `${prefix} output[0:${halfLength - 1}]`)
+ .notBeConstantValueOf(0);
+ should(
+ expected.slice(expected.length),
+ `${prefix} output[${halfLength}:]`)
+ .beConstantValueOf(0);
+
+ // Outputs should be the same. Break the message into two
+ // parts so we can see the expected outputs.
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Q AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test Q AudioParam. Use a bandpass filter whose center frequency
+ // is fairly far from the oscillator frequency. Then start with a Q
+ // value of 0 (so everything goes through) and then increase Q to
+ // some large value such that the out-of-band signals are basically
+ // cutoff.
+ let frequency = 440;
+ let oscFrequency = 4 * frequency;
+
+ let options = {
+ sampleRate: sampleRate,
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ paramName: 'Q',
+ filterOptions: {type: 'bandpass', frequency: frequency, Q: 0},
+ autoStart: {method: 'setValueAtTime', args: [0, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [100, testDuration / 4]
+ }
+ };
+
+ const buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Outputs should be the same
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Gain AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test gain AudioParam. Use a peaking filter with a large Q so the
+ // peak is narrow with a center frequency the same as the oscillator
+ // frequency. Start with a gain of 0 so everything goes through and
+ // then ramp the gain down to -100 so that the oscillator is
+ // filtered out.
+ let oscFrequency = 4 * 440;
+
+ let options = {
+ sampleRate: sampleRate,
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ paramName: 'gain',
+ filterOptions:
+ {type: 'peaking', frequency: oscFrequency, Q: 100, gain: 0},
+ autoStart: {method: 'setValueAtTime', args: [0, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [-100, testDuration / 4]
+ }
+ };
+
+ const buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Outputs should be the same
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Detune AudioParam', description: 'k-rate input works'},
+ async (task, should) => {
+ // Test detune AudioParam. The basic idea is the same as the
+ // frequency test above, but insteda of automating the frequency, we
+ // automate the detune value so that initially the filter cutuff is
+ // unchanged and then changing the detune until the cutoff goes to 1
+ // Hz, which would cause the oscillator to be filtered out.
+ let oscFrequency = 440;
+ let filterFrequency = 5 * oscFrequency;
+
+ // For a detune value d, the computed frequency, fc, of the filter
+ // is fc = f*2^(d/1200), where f is the frequency of the filter. Or
+ // d = 1200*log2(fc/f). Compute the detune value to produce a final
+ // cutoff frequency of 1 Hz.
+ let detuneEnd = 1200 * Math.log2(1 / filterFrequency);
+
+ let options = {
+ sampleRate: sampleRate,
+ oscFrequency: oscFrequency,
+ testDuration: testDuration,
+ paramName: 'detune',
+ filterOptions: {
+ type: 'lowpass',
+ frequency: filterFrequency,
+ detune: 0,
+ gain: 0
+ },
+ autoStart: {method: 'setValueAtTime', args: [0, 0]},
+ autoEnd: {
+ method: 'linearRampToValueAtTime',
+ args: [detuneEnd, testDuration / 4]
+ }
+ };
+
+ const buffer = await doTest(should, options);
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Outputs should be the same
+ checkForSameOutput(should, options.paramName, actual, expected);
+
+ task.done();
+ });
+
+ audit.define('All k-rate inputs', async (task, should) => {
+ // Test the case where all AudioParams are set to k-rate with an input
+ // to each AudioParam. Similar to the above tests except all the params
+ // are k-rate.
+ let testFrames = testDuration * sampleRate;
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, sampleRate: sampleRate, length: testFrames});
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ // The peaking filter uses all four AudioParams, so this is the node to
+ // test.
+ let filterOptions =
+ {type: 'peaking', frequency: 0, detune: 0, gain: 0, Q: 0};
+ let refNode;
+ should(
+ () => refNode = new BiquadFilterNode(context, filterOptions),
+ `Create: refNode = new BiquadFilterNode(context, ${
+ JSON.stringify(filterOptions)})`)
+ .notThrow();
+
+ let tstNode;
+ should(
+ () => tstNode = new BiquadFilterNode(context, filterOptions),
+ `Create: tstNode = new BiquadFilterNode(context, ${
+ JSON.stringify(filterOptions)})`)
+ .notThrow();
+ ;
+
+ // Make all the AudioParams k-rate.
+ ['frequency', 'Q', 'gain', 'detune'].forEach(param => {
+ should(
+ () => refNode[param].automationRate = 'k-rate',
+ `Set rate: refNode[${param}].automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => tstNode[param].automationRate = 'k-rate',
+ `Set rate: tstNode[${param}].automationRate = 'k-rate'`)
+ .notThrow();
+ });
+
+ // One input for each AudioParam.
+ let mod = {};
+ ['frequency', 'Q', 'gain', 'detune'].forEach(param => {
+ should(
+ () => mod[param] = new ConstantSourceNode(context, {offset: 0}),
+ `Create: mod[${
+ param}] = new ConstantSourceNode(context, {offset: 0})`)
+ .notThrow();
+ ;
+ should(
+ () => mod[param].offset.automationRate = 'a-rate',
+ `Set rate: mod[${param}].offset.automationRate = 'a-rate'`)
+ .notThrow();
+ });
+
+ // Set up automations for refNode. We want to start the filter with
+ // parameters that let the oscillator signal through more or less
+ // untouched. Then change the filter parameters to filter out the
+ // oscillator. What happens in between doesn't reall matter for this
+ // test. Hence, set the initial parameters with a center frequency well
+ // above the oscillator and a Q and gain of 0 to pass everthing.
+ [['frequency', [4 * src.frequency.value, 0]], ['Q', [0, 0]],
+ ['gain', [0, 0]], ['detune', [4 * 1200, 0]]]
+ .forEach(param => {
+ should(
+ () => refNode[param[0]].setValueAtTime(...param[1]),
+ `Automate 0: refNode.${param[0]}.setValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ should(
+ () => mod[param[0]].offset.setValueAtTime(...param[1]),
+ `Automate 0: mod[${param[0]}].offset.setValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ });
+
+ // Now move the filter frequency to the oscillator frequency with a high
+ // Q and very low gain to remove the oscillator signal.
+ [['frequency', [src.frequency.value, testDuration / 4]],
+ ['Q', [40, testDuration / 4]], ['gain', [-100, testDuration / 4]], [
+ 'detune', [0, testDuration / 4]
+ ]].forEach(param => {
+ should(
+ () => refNode[param[0]].linearRampToValueAtTime(...param[1]),
+ `Automate 1: refNode[${param[0]}].linearRampToValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ should(
+ () => mod[param[0]].offset.linearRampToValueAtTime(...param[1]),
+ `Automate 1: mod[${param[0]}].offset.linearRampToValueAtTime(${
+ param[1][0]}, ${param[1][1]})`)
+ .notThrow();
+ });
+
+ // Connect everything
+ src.connect(refNode).connect(merger, 0, 0);
+ src.connect(tstNode).connect(merger, 0, 1);
+
+ src.start();
+ for (let param in mod) {
+ should(
+ () => mod[param].connect(tstNode[param]),
+ `Connect: mod[${param}].connect(tstNode.${param})`)
+ .notThrow();
+ }
+
+ for (let param in mod) {
+ should(() => mod[param].start(), `Start: mod[${param}].start()`)
+ .notThrow();
+ }
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Sanity check that the output isn't all zeroes.
+ should(actual, 'All k-rate AudioParams').notBeConstantValueOf(0);
+ should(actual, 'All k-rate AudioParams').beCloseToArray(expected, {
+ absoluteThreshold: 0
+ });
+
+ task.done();
+ });
+
+ audit.run();
+
+ async function doTest(should, options) {
+ // Test that a k-rate AudioParam with an input reads the input value and
+ // is actually k-rate.
+ //
+ // A refNode is created with an automation timeline. This is the
+ // expected output.
+ //
+ // The testNode is the same, but it has a node connected to the k-rate
+ // AudioParam. The input to the node is an a-rate ConstantSourceNode
+ // whose output is automated in exactly the same was as the refNode. If
+ // the test passes, the outputs of the two nodes MUST match exactly.
+
+ // The options argument MUST contain the following members:
+ // sampleRate - the sample rate for the offline context
+ // testDuration - duration of the offline context, in sec.
+ // paramName - the name of the AudioParam to be tested
+ // oscFrequency - frequency of oscillator source
+ // filterOptions - options used to construct the BiquadFilterNode
+ // autoStart - information about how to start the automation
+ // autoEnd - information about how to end the automation
+ //
+ // The autoStart and autoEnd options are themselves dictionaries with
+ // the following required members:
+ // method - name of the automation method to be applied
+ // args - array of arguments to be supplied to the method.
+ let {
+ sampleRate,
+ paramName,
+ oscFrequency,
+ autoStart,
+ autoEnd,
+ testDuration,
+ filterOptions
+ } = options;
+
+ let testFrames = testDuration * sampleRate;
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, sampleRate: sampleRate, length: testFrames});
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Any calls to |should| are meant to be informational so we can see
+ // what nodes are created and the automations used.
+ let src;
+
+ // Create the source.
+ should(
+ () => {
+ src = new OscillatorNode(context, {frequency: oscFrequency});
+ },
+ `${paramName}: new OscillatorNode(context, {frequency: ${
+ oscFrequency}})`)
+ .notThrow();
+
+ // The refNode automates the AudioParam with k-rate automations, no
+ // inputs.
+ let refNode;
+ should(
+ () => {
+ refNode = new BiquadFilterNode(context, filterOptions);
+ },
+ `Reference BiquadFilterNode(c, ${JSON.stringify(filterOptions)})`)
+ .notThrow();
+
+ refNode[paramName].automationRate = 'k-rate';
+
+ // Set up automations for the reference node.
+ should(
+ () => {
+ refNode[paramName][autoStart.method](...autoStart.args);
+ },
+ `refNode.${paramName}.${autoStart.method}(${autoStart.args})`)
+ .notThrow();
+ should(
+ () => {
+ refNode[paramName][autoEnd.method](...autoEnd.args);
+ },
+ `refNode.${paramName}.${autoEnd.method}.(${autoEnd.args})`)
+ .notThrow();
+
+ // The tstNode does the same automation, but it comes from the input
+ // connected to the AudioParam.
+ let tstNode;
+ should(
+ () => {
+ tstNode = new BiquadFilterNode(context, filterOptions);
+ },
+ `Test BiquadFilterNode(context, ${JSON.stringify(filterOptions)})`)
+ .notThrow();
+ tstNode[paramName].automationRate = 'k-rate';
+
+ // Create the input to the AudioParam of the test node. The output of
+ // this node MUST have the same set of automations as the reference
+ // node, and MUST be a-rate to make sure we're handling k-rate inputs
+ // correctly.
+ let mod = new ConstantSourceNode(context);
+ mod.offset.automationRate = 'a-rate';
+ should(
+ () => {
+ mod.offset[autoStart.method](...autoStart.args);
+ },
+ `${paramName}: mod.offset.${autoStart.method}(${autoStart.args})`)
+ .notThrow();
+ should(
+ () => {
+ mod.offset[autoEnd.method](...autoEnd.args);
+ },
+ `${paramName}: mod.offset.${autoEnd.method}(${autoEnd.args})`)
+ .notThrow();
+
+ // Create graph
+ mod.connect(tstNode[paramName]);
+ src.connect(refNode).connect(merger, 0, 0);
+ src.connect(tstNode).connect(merger, 0, 1);
+
+ // Run!
+ src.start();
+ mod.start();
+ return context.startRendering();
+ }
+
+ function checkForSameOutput(should, paramName, actual, expected) {
+ let halfLength = expected.length / 2;
+
+ // Outputs should be the same. We break the check into halves so we can
+ // see the expected outputs. Mostly for a simple visual check that the
+ // output from the second half is small because the tests generally try
+ // to filter out the signal so that the last half of the output is
+ // small.
+ should(
+ actual.slice(0, halfLength),
+ `k-rate ${paramName} with input: output[0,${halfLength}]`)
+ .beCloseToArray(
+ expected.slice(0, halfLength), {absoluteThreshold: 0});
+ should(
+ actual.slice(halfLength),
+ `k-rate ${paramName} with input: output[${halfLength}:]`)
+ .beCloseToArray(expected.slice(halfLength), {absoluteThreshold: 0});
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html
new file mode 100644
index 0000000000..85ae4f175f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-biquad.html
@@ -0,0 +1,111 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams of BiquadFilterNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {task: 'BiquadFilter-0', label: 'Biquad k-rate AudioParams (all)'},
+ (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ nodeName: 'BiquadFilterNode',
+ nodeOptions: {type: 'lowpass'},
+ prefix: 'All k-rate params',
+ // Set all AudioParams to k-rate
+ rateSettings: [
+ {name: 'Q', value: 'k-rate'},
+ {name: 'detune', value: 'k-rate'},
+ {name: 'frequency', value: 'k-rate'},
+ {name: 'gain', value: 'k-rate'},
+ ],
+ // Automate just the frequency
+ automations: [{
+ name: 'frequency',
+ methods: [
+ {name: 'setValueAtTime', options: [350, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [0, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ // Define a test where we verify that a k-rate audio param produces
+ // different results from an a-rate audio param for each of the audio
+ // params of a biquad.
+ //
+ // Each entry gives the name of the AudioParam, an initial value to be
+ // used with setValueAtTime, and a final value to be used with
+ // linearRampToValueAtTime. (See |doTest| for details as well.)
+
+ [{name: 'Q',
+ initial: 1,
+ final: 10
+ },
+ {name: 'detune',
+ initial: 0,
+ final: 1200
+ },
+ {name: 'frequency',
+ initial: 350,
+ final: 0
+ },
+ {name: 'gain',
+ initial: 10,
+ final: 0
+ }].forEach(paramProperty => {
+ audit.define('Biquad k-rate ' + paramProperty.name, (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ nodeName: 'BiquadFilterNode',
+ nodeOptions: {type: 'peaking', Q: 1, gain: 10},
+ prefix: `k-rate ${paramProperty.name}`,
+ // Just set the frequency to k-rate
+ rateSettings: [
+ {name: paramProperty.name, value: 'k-rate'},
+ ],
+ // Automate just the given AudioParam
+ automations: [{
+ name: paramProperty.name,
+ methods: [
+ {name: 'setValueAtTime', options: [paramProperty.initial, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [paramProperty.final, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html
new file mode 100644
index 0000000000..730f03e561
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-connections.html
@@ -0,0 +1,139 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with Inputs</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Must be power of two to eliminate round-off
+ const sampleRate = 8192;
+
+ // Arbitrary duration that doesn't need to be too long to verify k-rate
+ // automations. Probably should be at least a few render quanta.
+ const testDuration = 8 * RENDER_QUANTUM_FRAMES / sampleRate;
+
+ // Test k-rate GainNode.gain is k-rate
+ audit.define(
+ {label: 'Gain', description: 'k-rate GainNode.gain'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new ConstantSourceNode(context);
+
+ createTestSubGraph(context, src, merger, 'GainNode', 'gain');
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < actual.length;
+ k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `gain[${k}:${k + RENDER_QUANTUM_FRAMES}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ // Test k-rate StereoPannerNode.pan is k-rate
+ audit.define(
+ {label: 'StereoPanner', description: 'k-rate StereoPannerNode.pan'},
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new ConstantSourceNode(context);
+
+ createTestSubGraph(
+ context, src, merger, 'StereoPannerNode', 'pan', {
+ testModSetup: node => {
+ node.offset.setValueAtTime(-1, 0);
+ node.offset.linearRampToValueAtTime(1, testDuration);
+ }
+ });
+
+ src.start();
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < actual.length; k += 128) {
+ should(actual.slice(k, k + 128), `pan[${k}:${k + 128}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ function createTestSubGraph(
+ context, src, merger, nodeName, paramName, options) {
+ // The test node which has its AudioParam set up for k-rate autmoations.
+ let tstNode = new window[nodeName](context);
+
+ if (options && options.setups) {
+ options.setups(tstNode);
+ }
+ tstNode[paramName].automationRate = 'k-rate';
+
+ // Modulating signal for the test node. Just a linear ramp. This is
+ // connected to the AudioParam of the tstNode.
+ let tstMod = new ConstantSourceNode(context);
+ if (options && options.testModSetup) {
+ options.testModSetup(tstMod);
+ } else {
+ tstMod.offset.linearRampToValueAtTime(context.length, testDuration);
+ }
+
+ tstMod.connect(tstNode[paramName]);
+ src.connect(tstNode).connect(merger, 0, 0);
+
+ // The ref node is the same type of node as the test node, but uses
+ // a-rate automation. However, the modulating signal is k-rate. This
+ // causes the input to the audio param to be constant over a render,
+ // which is basically the same as making the audio param be k-rate.
+ let refNode = new window[nodeName](context);
+ let refMod = new ConstantSourceNode(context);
+ refMod.offset.automationRate = 'k-rate';
+ if (options && options.testModSetup) {
+ options.testModSetup(refMod);
+ } else {
+ refMod.offset.linearRampToValueAtTime(context.length, testDuration);
+ }
+
+ refMod.connect(refNode[paramName]);
+ src.connect(refNode).connect(merger, 0, 1);
+
+ tstMod.start();
+ refMod.start();
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html
new file mode 100644
index 0000000000..0bea5c91f8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-constant-source.html
@@ -0,0 +1,176 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of ConstantSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('ConstantSource k-rate offset', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ let testDuration = 4 * 128 / sampleRate;
+
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ sourceNodeName: 'none',
+ verifyPieceWiseConstant: true,
+ nodeName: 'ConstantSourceNode',
+ prefix: 'k-rate offset',
+ rateSettings: [{name: 'offset', value: 'k-rate'}],
+ automations: [{
+ name: 'offset',
+ methods: [
+ {name: 'setValueAtTime', options: [0, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [10, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ // Parameters for the For the following tests.
+
+ // Must be power of two to eliminate round-off
+ const sampleRate8k = 8192;
+
+ // Arbitrary duration that doesn't need to be too long to verify k-rate
+ // automations. Probably should be at least a few render quanta.
+ const testDuration = 8 * RENDER_QUANTUM_FRAMES / sampleRate8k;
+
+ // Basic test that k-rate ConstantSourceNode.offset is k-rate. This is
+ // the basis for all of the following tests, so make sure it's right.
+ audit.define(
+ {
+ label: 'ConstantSourceNode.offset k-rate automation',
+ description:
+ 'Explicitly test ConstantSourceNode.offset k-rate automation is k-rate'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate8k,
+ length: testDuration * sampleRate8k
+ });
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // k-rate ConstantSource.offset using a linear ramp starting at 0
+ // and incrementing by 1 for each frame.
+ let src = new ConstantSourceNode(context, {offset: 0});
+ src.offset.automationRate = 'k-rate';
+
+ src.offset.setValueAtTime(0, 0);
+ src.offset.linearRampToValueAtTime(context.length, testDuration);
+
+ src.connect(merger, 0, 0);
+
+ src.start();
+
+ // a-rate ConstantSource using the same ramp as above.
+ let refSrc = new ConstantSourceNode(context, {offset: 0});
+
+ refSrc.offset.setValueAtTime(0, 0);
+ refSrc.offset.linearRampToValueAtTime(context.length, testDuration);
+
+ refSrc.connect(merger, 0, 1);
+
+ refSrc.start();
+
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < actual.length;
+ k += RENDER_QUANTUM_FRAMES) {
+ // Verify that the k-rate output is constant over the render
+ // and that it matches the value of the a-rate value at the
+ // beginning of the render.
+ should(
+ actual.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `k-rate ConstantSource.offset: output[${k}:${
+ k + RENDER_QUANTUM_FRAMES}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ // This test verifies that a k-rate input to the ConstantSourceNode.offset
+ // works just as if we set the AudioParam to be k-rate. This is the basis
+ // of the following tests, so make sure it works.
+ audit.define(
+ {
+ label: 'ConstantSource.offset',
+ description: 'Verify k-rate automation matches k-rate input'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate8k,
+ length: testDuration * sampleRate8k
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let tstSrc = new ConstantSourceNode(context);
+ let tstMod = new ConstantSourceNode(context);
+ tstSrc.offset.automationRate = 'k-rate';
+ tstMod.offset.linearRampToValueAtTime(context.length, testDuration);
+
+ tstMod.connect(tstSrc.offset)
+ tstSrc.connect(merger, 0, 0);
+
+ let refSrc = new ConstantSourceNode(context);
+ let refMod = new ConstantSourceNode(context);
+ refMod.offset.linearRampToValueAtTime(context.length, testDuration);
+ refMod.offset.automationRate = 'k-rate';
+
+ refMod.connect(refSrc.offset);
+ refSrc.connect(merger, 0, 1);
+
+ tstSrc.start();
+ tstMod.start();
+ refSrc.start();
+ refMod.start();
+
+ context.startRendering()
+ .then(buffer => {
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+
+ for (let k = 0; k < context.length;
+ k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `ConstantSource.offset k-rate input: output[${k}:${
+ k + RENDER_QUANTUM_FRAMES}]`)
+ .beConstantValueOf(expected[k]);
+ }
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html
new file mode 100644
index 0000000000..fcf66f2e3e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay-connections.html
@@ -0,0 +1,156 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with inputs for DelayNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Power-of-two to eliminate round-off in computing time and frames, but
+ // is otherwise arbitrary.
+ const sampleRate = 8192;
+
+ // Arbitrary duration except it must be greater than or equal to 1.
+ const testDuration = 1.5;
+
+ audit.define(
+ {label: 'delayTime', description: `DelayNode delayTime k-rate input`},
+ async (task, should) => {
+ // Two channels: 0 = test result, 1 = expected result.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Test the DelayNode by having a reference node (refNode) that uses
+ // k-rate automations of delayTime. The test node (testNode) sets
+ // delayTime to k-rate with a connected input that has the same
+ // automation vlaues as the reference node. The test passes if the
+ // output from each node is identical to each other.
+
+ // Just some non-constant source.
+ let src = new OscillatorNode(context);
+
+ // The end value and time for the linear ramp. These values are
+ // chosen so that the delay advances faster than real time.
+ let endValue = 1.125;
+ let endTime = 1;
+
+ let refNode;
+
+ should(
+ () => refNode = new DelayNode(context),
+ `refNode = new DelayNode(context)`)
+ .notThrow();
+
+ should(
+ () => refNode.delayTime.automationRate = 'k-rate',
+ `refNode.delayTime.automationRate = 'k-rate'`)
+ .notThrow();
+
+ should(
+ () => refNode.delayTime.setValueAtTime(0, 0),
+ `refNode.delayTime.setValueAtTime(0, 0)`)
+ .notThrow();
+
+ should(
+ () => refNode.delayTime.linearRampToValueAtTime(
+ endValue, endTime),
+ `refNode.delayTime.linearRampToValueAtTime(${endValue}, ${
+ endTime})`)
+ .notThrow();
+
+ let testNode;
+
+ should(
+ () => testNode = new DelayNode(context),
+ `testNode = new DelayNode(context)`)
+ .notThrow();
+
+ should(
+ () => testNode.delayTime.automationRate = 'k-rate',
+ `testNode.delayTime.automationRate = 'k-rate'`)
+ .notThrow();
+
+ let testMod;
+
+ should(
+ () => testMod = new ConstantSourceNode(context),
+ `testMod = new ConstantSourceNode(context)`)
+ .notThrow();
+
+ should(
+ () => testMod.offset.setValueAtTime(0, 0),
+ `testMod.offset.setValueAtTime(0, 0)`)
+ .notThrow();
+
+ should(
+ () => testMod.offset.linearRampToValueAtTime(endValue, endTime),
+ `testMod.offset.linearRampToValueAtTime(${endValue}, ${
+ endTime})`)
+ .notThrow();
+
+ should(
+ () => testMod.connect(testNode.delayTime),
+ `testMod.connect(testNode.delayTime)`)
+ .notThrow();
+
+ // Connect up everything and go!
+ src.connect(testNode).connect(merger, 0, 0);
+ src.connect(refNode).connect(merger, 0, 1);
+
+ src.start();
+ testMod.start();
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // Quick sanity check that output isn't zero. This means we messed
+ // up the connections or automations or the buffer source.
+ should(expected, `Expected k-rate delayTime AudioParam with input`)
+ .notBeConstantValueOf(0);
+ should(actual, `Actual k-rate delayTime AudioParam with input`)
+ .notBeConstantValueOf(0);
+
+ // Quick sanity check. The amount of delay after one render is
+ // endValue * 128 / sampleRate. But after 1 render, time has
+ // advanced 128/sampleRate. Hence, the delay exceeds the time by
+ // (endValue - 1)*128/sampleRate sec or (endValue - 1)*128 frames.
+ // This means the output must be EXACTLY zero for this many frames
+ // in the second render.
+ let zeroFrames = (endValue - 1) * RENDER_QUANTUM_FRAMES;
+ should(
+ actual.slice(
+ RENDER_QUANTUM_FRAMES, RENDER_QUANTUM_FRAMES + zeroFrames),
+ `output[${RENDER_QUANTUM_FRAMES}, ${
+ RENDER_QUANTUM_FRAMES + zeroFrames - 1}]`)
+ .beConstantValueOf(0);
+ should(
+ actual.slice(
+ RENDER_QUANTUM_FRAMES + zeroFrames,
+ 2 * RENDER_QUANTUM_FRAMES),
+ `output[${RENDER_QUANTUM_FRAMES + zeroFrames}, ${
+ 2 * RENDER_QUANTUM_FRAMES - 1}]`)
+ .notBeConstantValueOf(0);
+
+ // The expected and actual results must be EXACTLY the same.
+ should(actual, `k-rate delayTime AudioParam with input`)
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+ });
+
+ audit.run();
+ </script>
+ </body>
+ </html> \ No newline at end of file
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html
new file mode 100644
index 0000000000..5465c39430
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-delay.html
@@ -0,0 +1,49 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of DelayNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Test k-rate DelayNode', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+
+ doTest(context, should, {
+ nodeName: 'DelayNode',
+ nodeOptions: null,
+ prefix: 'DelayNode',
+ // Set all AudioParams to k-rate
+ rateSettings: [{name: 'delayTime', value: 'k-rate'}],
+ // Automate just the frequency
+ automations: [{
+ name: 'delayTime',
+ methods: [
+ {name: 'setValueAtTime', options: [0, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [.5, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html
new file mode 100644
index 0000000000..c1755cd155
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-dynamics-compressor-connections.html
@@ -0,0 +1,145 @@
+<!doctype html>
+<html>
+ <head>
+ <title>k-rate AudioParams with inputs for DynamicsCompressorNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Fairly abitrary sampleRate and somewhat duration
+ const sampleRate = 48000;
+ const testDuration = 0.25;
+
+ ['attack', 'knee', 'ratio', 'release', 'threshold'].forEach(param => {
+ audit.define(
+ {label: param, description: `Dynamics compressor ${param}`},
+ async (task, should) => {
+ await doTest(should, {prefix: task.label, paramName: param});
+ task.done();
+ });
+ });
+
+ audit.run();
+
+ async function doTest(should, options) {
+ // Test k-rate automation of DynamicsCompressorNode with connected
+ // input.
+ //
+ // A reference compressor node is created with an automation on the
+ // selected AudioParam. For simplicity, we just use a linear ramp from
+ // the minValue to the maxValue of the AudioParam.
+ //
+ // The test node has an input signal connected to the AudioParam. This
+ // input signal is created to match the automation on the reference
+ // node.
+ //
+ // Finally, the output from the two nodes must be identical if k-rate
+ // inputs are working correctly.
+ //
+ // Options parameter is a dictionary with the following required
+ // members:
+ // prefix - prefix to use for the messages.
+ // paramName - Name of the AudioParam to be tested
+
+ let {prefix, paramName} = options;
+
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Use an oscillator for the source. Pretty arbitrary parameters.
+ let src =
+ new OscillatorNode(context, {type: 'sawtooth', frequency: 440});
+
+ // Create the reference and test nodes.
+ let refNode;
+ let tstNode;
+
+ should(
+ () => refNode = new DynamicsCompressorNode(context),
+ `${prefix}: refNode = new DynamicsCompressorNode(context)`)
+ .notThrow();
+
+ let tstOptions = {};
+ tstOptions[paramName] = refNode[paramName].minValue;
+ should(
+ () => tstNode = new DynamicsCompressorNode(context, tstOptions),
+ `${prefix}: tstNode = new DynamicsCompressorNode(context, ${
+ JSON.stringify(tstOptions)})`)
+ .notThrow();
+
+
+ // Automate the AudioParam of the reference node with a linear ramp
+ should(
+ () => refNode[paramName].setValueAtTime(
+ refNode[paramName].minValue, 0),
+ `${prefix}: refNode[${paramName}].setValueAtTime(refNode[${
+ paramName}].minValue, 0)`)
+ .notThrow();
+
+ should(
+ () => refNode[paramName].linearRampToValueAtTime(
+ refNode[paramName].maxValue, testDuration),
+ `${prefix}: refNode[${paramName}].linearRampToValueAtTime(refNode[${
+ paramName}].minValue, ${testDuration})`)
+ .notThrow();
+
+
+ // Create the input node and automate it so that it's output when added
+ // to the intrinsic value of the AudioParam we get the same values as
+ // the automations on the ference node. We need to do it this way
+ // because the ratio AudioParam has a nominal range of [1, 20] so we
+ // can't just set the value to 0, which is what we'd normally do.
+ let mod;
+ should(
+ () => mod = new ConstantSourceNode(context, {offset: 0}),
+ `${prefix}: mod = new ConstantSourceNode(context, {offset: 0})`)
+ .notThrow();
+ let endValue =
+ refNode[paramName].maxValue - refNode[paramName].minValue;
+ should(
+ () => mod.offset.setValueAtTime(0, 0),
+ `${prefix}: mod.offset.setValueAtTime(0, 0)`)
+ .notThrow();
+ should(
+ () => mod.offset.linearRampToValueAtTime(endValue, testDuration),
+ `${prefix}: mod.offset.linearRampToValueAtTime(${endValue}, ${
+ testDuration})`)
+ .notThrow();
+
+ // Connect up everything.
+ should(
+ () => mod.connect(tstNode[paramName]),
+ `${prefix}: mod.connect(tstNode[${paramName}])`)
+ .notThrow();
+
+ src.connect(refNode).connect(merger, 0, 0);
+ src.connect(tstNode).connect(merger, 0, 1);
+
+ // Go!
+ src.start();
+ mod.start();
+
+ const buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // The expected and actual results must be EXACTLY the same.
+ should(actual, `k-rate ${paramName} AudioParam with input`)
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html
new file mode 100644
index 0000000000..887d9f78db
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-gain.html
@@ -0,0 +1,47 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of GainNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Test k-rate GainNode', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+
+ doTest(context, should, {
+ nodeName: 'GainNode',
+ nodeOptions: null,
+ prefix: 'GainNode',
+ // Set AudioParam to k-rate
+ rateSettings: [{name: 'gain', value: 'k-rate'}],
+ // Automate
+ automations: [{
+ name: 'gain',
+ methods: [
+ {name: 'setValueAtTime', options: [1, 0]},
+ {name: 'linearRampToValueAtTime', options: [0, testDuration]}
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html
new file mode 100644
index 0000000000..475b364367
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator-connections.html
@@ -0,0 +1,578 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ k-rate AudioParams with inputs for OscillatorNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Sample rate must be a power of two to eliminate round-off when
+ // computing time from frames and vice versa. Using a non-power of two
+ // will work, but the thresholds below will not be zero. They're probably
+ // closer to 1e-5 or so, but if everything is working correctly, the
+ // outputs really should be exactly equal.
+ const sampleRate = 8192;
+
+ // Fairly arbitrary but short duration to limit runtime.
+ const testFrames = 5 * RENDER_QUANTUM_FRAMES;
+ const testDuration = testFrames / sampleRate;
+
+ audit.define(
+ {label: 'Test 1', description: 'k-rate frequency input'},
+ async (task, should) => {
+ // Test that an input to the frequency AudioParam set to k-rate
+ // works.
+
+ // Fairly arbitrary start and end frequencies for the automation.
+ const freqStart = 100;
+ const freqEnd = 2000;
+
+ let refSetup = (context) => {
+ let srcRef = new OscillatorNode(context, {frequency: 0});
+
+ should(
+ () => srcRef.frequency.automationRate = 'k-rate',
+ `${task.label}: srcRef.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => srcRef.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => srcRef.frequency.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: srcRef.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ return srcRef;
+ };
+
+ let testSetup = (context) => {
+ let srcTest = new OscillatorNode(context, {frequency: 0});
+ should(
+ () => srcTest.frequency.automationRate = 'k-rate',
+ `${task.label}: srcTest.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+
+ return srcTest;
+ };
+
+ let modSetup = (context) => {
+ let mod = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => mod.offset.setValueAtTime(freqStart, 0),
+ `${task.label}: modFreq.offset.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () =>
+ mod.offset.linearRampToValueAtTime(freqEnd, testDuration),
+ `${task.label}: modFreq.offset.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ // This node is going to be connected to the frequency AudioParam.
+ return {frequency: mod};
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate frequency with input',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 2', description: 'k-rate detune input'},
+ async (task, should) => {
+ // Test that an input to the detune AudioParam set to k-rate works.
+ // Threshold experimentally determined. It should be probably not
+ // be much larger than 5e-5. or something is not right.
+
+ // Fairly arbitrary start and end detune values for automation.
+ const detuneStart = 0;
+ const detuneEnd = 2000;
+
+ let refSetup = (context) => {
+ let srcRef = new OscillatorNode(context, {detune: 0});
+
+ should(
+ () => srcRef.detune.automationRate = 'k-rate',
+ `${task.label}: srcRef.detune.automationRate = 'k-rate'`)
+ .notThrow();
+
+ should(
+ () => srcRef.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => srcRef.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return srcRef;
+ };
+
+ let testSetup = (context) => {
+ let srcTest = new OscillatorNode(context, {detune: 0});
+
+ should(
+ () => srcTest.detune.automationRate = 'k-rate',
+ `${task.label}: srcTest.detune.automationRate = 'k-rate'`)
+ .notThrow();
+
+ return srcTest;
+ };
+
+ let modSetup = (context) => {
+ let mod = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => mod.offset.setValueAtTime(detuneStart, 0),
+ `${task.label}: modDetune.offset.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => mod.offset.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: modDetune.offset.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return {detune: mod};
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate detune with input',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 3',
+ description: 'k-rate frequency input with a-rate detune'
+ },
+ async (task, should) => {
+ // Test OscillatorNode with a k-rate frequency with input and an
+ // a-rate detune iwth automations.
+
+ // Fairly arbitrary start and end values for the frequency and
+ // detune automations.
+ const freqStart = 100;
+ const freqEnd = 2000;
+ const detuneStart = 0;
+ const detuneEnd = -2000;
+
+ let refSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0});
+
+ // Set up k-rate frequency and a-rate detune
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcRef.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ 2000, testDuration),
+ `${task.label}: srcRef.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let testSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0});
+
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcTest.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcTest.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcTest.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let modSetup = (context) => {
+ let mod = {};
+ mod['frequency'] = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => mod['frequency'].offset.setValueAtTime(freqStart, 0),
+ `${task.label}: modFreq.offset.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+
+ should(
+ () => mod['frequency'].offset.linearRampToValueAtTime(
+ 2000, testDuration),
+ `${task.label}: modFreq.offset.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ return mod;
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate frequency input with a-rate detune',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 4',
+ description: 'a-rate frequency with k-rate detune input'
+ },
+ async (task, should) => {
+ // Test OscillatorNode with an a-rate frequency with automations and
+ // a k-rate detune with input.
+
+ // Fairly arbitrary start and end values for the frequency and
+ // detune automations.
+ const freqStart = 100;
+ const freqEnd = 2000;
+ const detuneStart = 0;
+ const detuneEnd = -2000;
+
+ let refSetup = (context) => {
+ let node = new OscillatorNode(context, {detune: 0});
+
+ // Set up a-rate frequency and k-rate detune
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ 2000, testDuration),
+ `${task.label}: srcRef.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcRef.detune.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let testSetup = (context) => {
+ let node = new OscillatorNode(context, {detune: 0});
+
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcTest.detune.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcTest.frequency.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: srcTest.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let modSetup = (context) => {
+ let mod = {};
+ const name = 'detune';
+
+ mod['detune'] = new ConstantSourceNode(context, {offset: 0});
+ should(
+ () => mod[name].offset.setValueAtTime(detuneStart, 0),
+ `${task.label}: modDetune.offset.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+
+ should(
+ () => mod[name].offset.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: modDetune.offset.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return mod;
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate detune input with a-rate frequency',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'Test 5',
+ description: 'k-rate inputs for frequency and detune'
+ },
+ async (task, should) => {
+ // Test OscillatorNode with k-rate frequency and detune with inputs
+ // on both.
+
+ // Fairly arbitrary start and end values for the frequency and
+ // detune automations.
+ const freqStart = 100;
+ const freqEnd = 2000;
+ const detuneStart = 0;
+ const detuneEnd = -2000;
+
+ let refSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0, detune: 0});
+
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcRef.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.frequency.setValueAtTime(freqStart, 0),
+ `${task.label}: srcRef.setValueAtTime(${freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.frequency.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: srcRef;.frequency.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcRef.detune.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.setValueAtTime(detuneStart, 0),
+ `${task.label}: srcRef.detune.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => node.detune.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: srcRef.detune.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return node;
+ };
+
+ let testSetup = (context) => {
+ let node = new OscillatorNode(context, {frequency: 0, detune: 0});
+
+ should(
+ () => node.frequency.automationRate = 'k-rate',
+ `${task.label}: srcTest.frequency.automationRate = 'k-rate'`)
+ .notThrow();
+ should(
+ () => node.detune.automationRate = 'k-rate',
+ `${task.label}: srcTest.detune.automationRate = 'k-rate'`)
+ .notThrow();
+
+ return node;
+ };
+
+ let modSetup = (context) => {
+ let modF = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => modF.offset.setValueAtTime(freqStart, 0),
+ `${task.label}: modFreq.offset.setValueAtTime(${
+ freqStart}, 0)`)
+ .notThrow();
+ should(
+ () => modF.offset.linearRampToValueAtTime(
+ freqEnd, testDuration),
+ `${task.label}: modFreq.offset.linearRampToValueAtTime(${
+ freqEnd}, ${testDuration})`)
+ .notThrow();
+
+ let modD = new ConstantSourceNode(context, {offset: 0});
+
+ should(
+ () => modD.offset.setValueAtTime(detuneStart, 0),
+ `${task.label}: modDetune.offset.setValueAtTime(${
+ detuneStart}, 0)`)
+ .notThrow();
+ should(
+ () => modD.offset.linearRampToValueAtTime(
+ detuneEnd, testDuration),
+ `${task.label}: modDetune.offset.linearRampToValueAtTime(${
+ detuneEnd}, ${testDuration})`)
+ .notThrow();
+
+ return {frequency: modF, detune: modD};
+ };
+
+ await testParams(should, {
+ prefix: task.label,
+ summary: 'k-rate inputs for both frequency and detune',
+ setupRefOsc: refSetup,
+ setupTestOsc: testSetup,
+ setupMod: modSetup
+ });
+
+ task.done();
+ });
+
+ audit.run();
+
+ async function testParams(should, options) {
+ // Test a-rate and k-rate AudioParams of an OscillatorNode.
+ //
+ // |options| should be a dictionary with these members:
+ // prefix - prefix to use for messages
+ // summary - message to be printed with the final results
+ // setupRefOsc - function returning the reference oscillator
+ // setupTestOsc - function returning the test oscillator
+ // setupMod - function returning nodes to be connected to the
+ // AudioParams.
+ //
+ // |setupRefOsc| and |setupTestOsc| are given the context and each
+ // method is expected to create an OscillatorNode with the appropriate
+ // automations for testing. The constructed OscillatorNode is returned.
+ //
+ // The reference oscillator
+ // should automate the desired AudioParams at the appropriate automation
+ // rate, and the output is the expected result.
+ //
+ // The test oscillator should set up the AudioParams but expect the
+ // AudioParam(s) have an input that matches the automation for the
+ // reference oscillator.
+ //
+ // |setupMod| must create one or two ConstantSourceNodes with exactly
+ // the same automations as used for the reference oscillator. This node
+ // is used as the input to an AudioParam of the test oscillator. This
+ // function returns a dictionary whose members are named 'frequency' and
+ // 'detune'. The name indicates which AudioParam the constant source
+ // node should be connected to.
+
+ // Two channels: 0 = reference signal, 1 = test signal
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // The reference oscillator.
+ let srcRef = options.setupRefOsc(context);
+
+ // The test oscillator.
+ let srcTest = options.setupTestOsc(context);
+
+ // Inputs to AudioParam.
+ let mod = options.setupMod(context);
+
+ if (mod['frequency']) {
+ should(
+ () => mod['frequency'].connect(srcTest.frequency),
+ `${options.prefix}: modFreq.connect(srcTest.frequency)`)
+ .notThrow();
+ mod['frequency'].start()
+ }
+
+ if (mod['detune']) {
+ should(
+ () => mod['detune'].connect(srcTest.detune),
+ `${options.prefix}: modDetune.connect(srcTest.detune)`)
+ .notThrow();
+ mod['detune'].start()
+ }
+
+ srcRef.connect(merger, 0, 0);
+ srcTest.connect(merger, 0, 1);
+
+ srcRef.start();
+ srcTest.start();
+
+ let buffer = await context.startRendering();
+ let expected = buffer.getChannelData(0);
+ let actual = buffer.getChannelData(1);
+
+ // The output of the reference and test oscillator should be
+ // exactly equal because the AudioParam values should be exactly
+ // equal.
+ should(actual, options.summary).beCloseToArray(expected, {
+ absoluteThreshold: 0
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html
new file mode 100644
index 0000000000..6803f55eab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-oscillator.html
@@ -0,0 +1,88 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams of OscillatorNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+
+ // Only new a few render quanta to verify things are working.
+ let testDuration = 4 * 128 / sampleRate;
+
+ [{name: 'detune', initial: 0, final: 1200}, {
+ name: 'frequency',
+ initial: 440,
+ final: sampleRate / 2
+ }].forEach(paramProperty => {
+ audit.define(
+ 'Oscillator k-rate ' + paramProperty.name, (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+ let inverter = new GainNode(context, {gain: -1});
+ inverter.connect(merger, 0, 2);
+
+ let kRateNode = new OscillatorNode(context);
+ let aRateNode = new OscillatorNode(context);
+
+ kRateNode.connect(merger, 0, 0);
+ aRateNode.connect(merger, 0, 1);
+
+ kRateNode.connect(merger, 0, 2);
+ aRateNode.connect(inverter);
+
+ // Set the rate
+ kRateNode[paramProperty.name].automationRate = 'k-rate';
+
+ // Automate the offset
+ kRateNode[paramProperty.name].setValueAtTime(
+ paramProperty.initial, 0);
+ kRateNode[paramProperty.name].linearRampToValueAtTime(
+ paramProperty.final, testDuration);
+
+ aRateNode[paramProperty.name].setValueAtTime(
+ paramProperty.initial, 0);
+ aRateNode[paramProperty.name].linearRampToValueAtTime(
+ paramProperty.final, testDuration);
+
+ kRateNode.start();
+ aRateNode.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ let kRateOut = audioBuffer.getChannelData(0);
+ let aRateOut = audioBuffer.getChannelData(1);
+ let diff = audioBuffer.getChannelData(2);
+
+ // Verify that the outputs are different.
+ should(
+ diff,
+ 'k-rate ' + paramProperty.name +
+ ': Difference between a-rate and k-rate outputs')
+ .notBeConstantValueOf(0);
+
+ })
+ .then(() => task.done());
+ });
+ });
+
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html
new file mode 100644
index 0000000000..001cf63bd3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner-connections.html
@@ -0,0 +1,238 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ k-rate AudioParams with inputs for PannerNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </title>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'Panner x', description: 'k-rate input'},
+ async (task, should) => {
+ await testPannerParams(should, {param: 'positionX'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Panner y', description: 'k-rate input'},
+ async (task, should) => {
+ await testPannerParams(should, {param: 'positionY'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Panner z', description: 'k-rate input'},
+ async (task, should) => {
+ await testPannerParams(should, {param: 'positionZ'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Listener x', description: 'k-rate input'},
+ async (task, should) => {
+ await testListenerParams(should, {param: 'positionX'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Listener y', description: 'k-rate input'},
+ async (task, should) => {
+ await testListenerParams(should, {param: 'positionY'});
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Listener z', description: 'k-rate input'},
+ async (task, should) => {
+ await testListenerParams(should, {param: 'positionZ'});
+ task.done();
+ });
+
+ audit.run();
+
+ async function testPannerParams(should, options) {
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+ const testFrames = 5 * RENDER_QUANTUM_FRAMES;
+ let testDuration = testFrames / sampleRate;
+ // Four channels needed because the first two are for the output of
+ // the reference panner, and the next two are for the test panner.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 4,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Create a stereo source out of two mono sources
+ let src0 = new ConstantSourceNode(context, {offset: 1});
+ let src1 = new ConstantSourceNode(context, {offset: 2});
+ let src = new ChannelMergerNode(context, {numberOfInputs: 2});
+ src0.connect(src, 0, 0);
+ src1.connect(src, 0, 1);
+
+ let finalPosition = 100;
+
+ // Reference panner node with k-rate AudioParam automations. The
+ // output of this panner is the reference output.
+ let refNode = new PannerNode(context);
+ // Initialize the panner location to somewhat arbitrary values.
+ refNode.positionX.value = 1;
+ refNode.positionY.value = 50;
+ refNode.positionZ.value = -25;
+
+ // Set the AudioParam under test with the appropriate automations.
+ refNode[options.param].automationRate = 'k-rate';
+ refNode[options.param].setValueAtTime(1, 0);
+ refNode[options.param].linearRampToValueAtTime(
+ finalPosition, testDuration);
+ let refSplit = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ // Test panner node with k-rate AudioParam with inputs.
+ let tstNode = new PannerNode(context);
+ tstNode.positionX.value = 1;
+ tstNode.positionY.value = 50;
+ tstNode.positionZ.value = -25;
+ tstNode[options.param].value = 0;
+ tstNode[options.param].automationRate = 'k-rate';
+ let tstSplit = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ // The input to the AudioParam. It must have the same automation
+ // sequence as used by refNode. And must be a-rate to demonstrate
+ // the k-rate effect of the AudioParam.
+ let mod = new ConstantSourceNode(context, {offset: 0});
+ mod.offset.setValueAtTime(1, 0);
+ mod.offset.linearRampToValueAtTime(finalPosition, testDuration);
+
+ mod.connect(tstNode[options.param]);
+
+ src.connect(refNode).connect(refSplit);
+ src.connect(tstNode).connect(tstSplit);
+
+ refSplit.connect(merger, 0, 0);
+ refSplit.connect(merger, 1, 1);
+ tstSplit.connect(merger, 0, 2);
+ tstSplit.connect(merger, 1, 3);
+
+ mod.start();
+ src0.start();
+ src1.start();
+
+ const buffer = await context.startRendering();
+ let expected0 = buffer.getChannelData(0);
+ let expected1 = buffer.getChannelData(1);
+ let actual0 = buffer.getChannelData(2);
+ let actual1 = buffer.getChannelData(3);
+
+ should(expected0, `Panner: ${options.param}: Expected output channel 0`)
+ .notBeConstantValueOf(expected0[0]);
+ should(expected1, `${options.param}: Expected output channel 1`)
+ .notBeConstantValueOf(expected1[0]);
+
+ // Verify output is a stair step because positionX is k-rate,
+ // and no other AudioParam is changing.
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual0.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Panner: ${options.param}: Channel 0 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(actual0[k]);
+ }
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ actual1.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Panner: ${options.param}: Channel 1 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(actual1[k]);
+ }
+
+ should(actual0, `Panner: ${options.param}: Actual output channel 0`)
+ .beCloseToArray(expected0, {absoluteThreshold: 0});
+ should(actual1, `Panner: ${options.param}: Actual output channel 1`)
+ .beCloseToArray(expected1, {absoluteThreshold: 0});
+ }
+
+ async function testListenerParams(should, options) {
+ // Arbitrary sample rate and duration.
+ const sampleRate = 8000;
+ const testFrames = 5 * RENDER_QUANTUM_FRAMES;
+ let testDuration = testFrames / sampleRate;
+ // Four channels needed because the first two are for the output of
+ // the reference panner, and the next two are for the test panner.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ // Create a stereo source out of two mono sources
+ let src0 = new ConstantSourceNode(context, {offset: 1});
+ let src1 = new ConstantSourceNode(context, {offset: 2});
+ let src = new ChannelMergerNode(context, {numberOfInputs: 2});
+ src0.connect(src, 0, 0);
+ src1.connect(src, 0, 1);
+
+ let finalPosition = 100;
+
+ // Reference panner node with k-rate AudioParam automations. The
+ // output of this panner is the reference output.
+ let panner = new PannerNode(context);
+ panner.positionX.value = 10;
+ panner.positionY.value = 50;
+ panner.positionZ.value = -25;
+
+ src.connect(panner);
+
+ let mod = new ConstantSourceNode(context, {offset: 0});
+ mod.offset.setValueAtTime(1, 0);
+ mod.offset.linearRampToValueAtTime(finalPosition, testDuration);
+
+ context.listener[options.param].automationRate = 'k-rate';
+ mod.connect(context.listener[options.param]);
+
+ panner.connect(context.destination);
+
+ src0.start();
+ src1.start();
+ mod.start();
+
+ const buffer = await context.startRendering();
+ let c0 = buffer.getChannelData(0);
+ let c1 = buffer.getChannelData(1);
+
+ // Verify output is a stair step because positionX is k-rate,
+ // and no other AudioParam is changing.
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ c0.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Listener: ${options.param}: Channel 0 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(c0[k]);
+ }
+
+ for (let k = 0; k < testFrames; k += RENDER_QUANTUM_FRAMES) {
+ should(
+ c1.slice(k, k + RENDER_QUANTUM_FRAMES),
+ `Listener: ${options.param}: Channel 1 output[${k}, ${
+ k + RENDER_QUANTUM_FRAMES - 1}]`)
+ .beConstantValueOf(c1[k]);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html
new file mode 100644
index 0000000000..60200b2471
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html
@@ -0,0 +1,178 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParams of PannerNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Define a test where we verify that a k-rate audio param produces
+ // different results from an a-rate audio param for each of the audio
+ // params of a biquad.
+ //
+ // Each entry gives the name of the AudioParam, an initial value to be
+ // used with setValueAtTime, and a final value to be used with
+ // linearRampToValueAtTime. (See |doTest| for details as well.)
+
+ [{name: 'positionX', initial: 0, final: 1000},
+ {name: 'positionY', initial: 0, final: 1000},
+ {name: 'orientationX', initial: 1, final: 10},
+ {name: 'orientationY', initial: 1, final: 10},
+ {name: 'orientationZ', initial: 1, final: 10},
+ ].forEach(paramProperty => {
+ audit.define('Panner k-rate ' + paramProperty.name, (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 5 * 128 / sampleRate;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ sourceNodeName: 'ConstantSourceNode',
+ verifyPieceWiseConstant: true,
+ nodeName: 'PannerNode',
+ // Make the source directional so orientation matters, and set some
+ // defaults for the position and orientation so that we're not on an
+ // axis where the azimuth and elevation might be constant when
+ // moving one of the AudioParams.
+ nodeOptions: {
+ distanceModel: 'inverse',
+ coneOuterAngle: 360,
+ coneInnerAngle: 0,
+ positionX: 1,
+ positionY: 1,
+ positionZ: 1,
+ orientationX: 0,
+ orientationY: 1,
+ orientationZ: 1
+ },
+ prefix: `k-rate ${paramProperty.name}`,
+ // Just set the frequency to k-rate
+ rateSettings: [
+ {name: paramProperty.name, value: 'k-rate'},
+ ],
+ // Automate just the given AudioParam
+ automations: [{
+ name: paramProperty.name,
+ methods: [
+ {name: 'setValueAtTime', options: [paramProperty.initial, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [paramProperty.final, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+ });
+
+ // Test k-rate automation of the listener. The intial and final
+ // automation values are pretty arbitrary, except that they should be such
+ // that the panner and listener produces non-constant output.
+ [{name: 'positionX', initial: [1, 0], final: [1000, 1]},
+ {name: 'positionY', initial: [1, 0], final: [1000, 1]},
+ {name: 'positionZ', initial: [1, 0], final: [1000, 1]},
+ {name: 'forwardX', initial: [-1, 0], final: [1, 1]},
+ {name: 'forwardY', initial: [-1, 0], final: [1, 1]},
+ {name: 'forwardZ', initial: [-1, 0], final: [1, 1]},
+ {name: 'upX', initial: [-1, 0], final: [1000, 1]},
+ {name: 'upY', initial: [-1, 0], final: [1000, 1]},
+ {name: 'upZ', initial: [-1, 0], final: [1000, 1]},
+ ].forEach(paramProperty => {
+ audit.define(
+ 'Listener k-rate ' + paramProperty.name, (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 5 * 128 / sampleRate;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doListenerTest(context, should, {
+ param: paramProperty.name,
+ initial: paramProperty.initial,
+ final: paramProperty.final
+ }).then(() => task.done());
+ });
+ });
+
+ audit.run();
+
+ function doListenerTest(context, should, options) {
+ let src = new ConstantSourceNode(context);
+ let panner = new PannerNode(context, {
+ distanceModel: 'inverse',
+ coneOuterAngle: 360,
+ coneInnerAngle: 10,
+ positionX: 10,
+ positionY: 10,
+ positionZ: 10,
+ orientationX: 1,
+ orientationY: 1,
+ orientationZ: 1
+ });
+
+ src.connect(panner).connect(context.destination);
+
+ src.start();
+
+ let listener = context.listener;
+
+ // Set listener properties to "random" values so that motion on one of
+ // the attributes actually changes things relative to the panner
+ // location. And the up and forward directions should have a simple
+ // relationship between them.
+ listener.positionX.value = -1;
+ listener.positionY.value = 1;
+ listener.positionZ.value = -1;
+ listener.forwardX.value = -1;
+ listener.forwardY.value = 1;
+ listener.forwardZ.value = -1;
+ // Make the up vector not parallel or perpendicular to the forward and
+ // position vectors so that automations of the up vector produce
+ // noticeable differences.
+ listener.upX.value = 1;
+ listener.upY.value = 1;
+ listener.upZ.value = 2;
+
+ let audioParam = listener[options.param];
+ audioParam.automationRate = 'k-rate';
+
+ let prefix = `Listener ${options.param}`;
+ should(audioParam.automationRate, prefix + '.automationRate')
+ .beEqualTo('k-rate');
+ should(() => {
+ audioParam.setValueAtTime(...options.initial);
+ }, prefix + `.setValueAtTime(${options.initial})`).notThrow();
+ should(() => {
+ audioParam.linearRampToValueAtTime(...options.final);
+ }, prefix + `.linearRampToValueAtTime(${options.final})`).notThrow();
+
+ return context.startRendering().then(renderedBuffer => {
+ let prefix = `Listener k-rate ${options.param}: `;
+ let output = renderedBuffer.getChannelData(0);
+ // Sanity check that the output isn't constant.
+ should(output, prefix + `Output`).notBeConstantValueOf(output[0]);
+
+ // Verify that the output is constant over each render quantum
+ for (let k = 0; k < output.length; k += 128) {
+ should(
+ output.slice(k, k + 128), prefix + `Output [${k}, ${k + 127}]`)
+ .beConstantValueOf(output[k]);
+ }
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html
new file mode 100644
index 0000000000..06905b89c3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/k-rate-stereo-panner.html
@@ -0,0 +1,48 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test k-rate AudioParam of StereoPannerNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="automation-rate-testing.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define('Test k-rate StereoPannerNode', (task, should) => {
+ // Arbitrary sample rate and duration.
+ let sampleRate = 8000;
+ let testDuration = 1;
+ let context = new OfflineAudioContext({
+ numberOfChannels: 3,
+ sampleRate: sampleRate,
+ length: testDuration * sampleRate
+ });
+
+ doTest(context, should, {
+ nodeName: 'StereoPannerNode',
+ nodeOptions: null,
+ prefix: 'StereoPannerNode',
+ // Set all AudioParams to k-rate.
+ rateSettings: [{name: 'pan', value: 'k-rate'}],
+ // Automate just the frequency.
+ automations: [{
+ name: 'pan',
+ methods: [
+ {name: 'setValueAtTime', options: [0, 0]}, {
+ name: 'linearRampToValueAtTime',
+ options: [.5, testDuration]
+ }
+ ]
+ }]
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html
new file mode 100644
index 0000000000..e9b8f0accb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/nan-param.html
@@ -0,0 +1,92 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test Flushing of NaN to Zero in AudioParams</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // See
+ // https://webaudio.github.io/web-audio-api/#computation-of-value.
+ //
+ // The computed value must replace NaN values in the output with
+ // the default value of the param.
+ audit.define('AudioParam NaN', async (task, should) => {
+ // For testing, we only need a small number of frames; and
+ // a low sample rate is perfectly fine. Use two channels.
+ // The first channel is for the AudioParam output. The
+ // second channel is for the AudioParam input.
+ let context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: 256, sampleRate: 8192});
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // A constant source with a huge value.
+ let mod = new ConstantSourceNode(context, {offset: 1e30});
+
+ // Gain nodes with a huge positive gain and huge negative
+ // gain. Combined with the huge offset in |mod|, the
+ // output of the gain nodes are +Infinity and -Infinity.
+ let gainPos = new GainNode(context, {gain: 1e30});
+ let gainNeg = new GainNode(context, {gain: -1e30});
+
+ mod.connect(gainPos);
+ mod.connect(gainNeg);
+
+ // Connect these to the second merger channel. This is a
+ // sanity check that the AudioParam input really is NaN.
+ gainPos.connect(merger, 0, 1);
+ gainNeg.connect(merger, 0, 1);
+
+ // Source whose AudioParam is connected to the graph
+ // that produces NaN values. Use a non-default value offset
+ // just in case something is wrong we get default for some
+ // other reason.
+ let src = new ConstantSourceNode(context, {offset: 100});
+
+ gainPos.connect(src.offset);
+ gainNeg.connect(src.offset);
+
+ // AudioParam output goes to channel 1 of the destination.
+ src.connect(merger, 0, 0);
+
+ // Let's go!
+ mod.start();
+ src.start();
+
+ let buffer = await context.startRendering();
+
+ let input = buffer.getChannelData(1);
+ let output = buffer.getChannelData(0);
+
+ // Have to test manually for NaN values in the input because
+ // NaN fails all comparisons.
+ let isNaN = true;
+ for (let k = 0; k < input.length; ++k) {
+ if (!Number.isNaN(input[k])) {
+ isNaN = false;
+ break;
+ }
+ }
+
+ should(isNaN, 'AudioParam input contains only NaN').beTrue();
+
+ // Output of the AudioParam should have all NaN values
+ // replaced by the default.
+ should(output, 'AudioParam output')
+ .beConstantValueOf(src.offset.defaultValue);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html
new file mode 100644
index 0000000000..c81c3ad23e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html
@@ -0,0 +1,70 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+ <head>
+ <title>Test exponentialRampToValue with end time in the past</title>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test exponentialRampToValue with end time in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setTargetAtTime with a time in the past
+ test.gain.exponentialRampToValueAtTime(
+ 0.1, 0.5 * context.currentTime);
+ test.gain.exponentialRampToValueAtTime(0.9, 1.0);
+
+ reference.gain.exponentialRampToValueAtTime(
+ 0.1, context.currentTime);
+ reference.gain.exponentialRampToValueAtTime(0.9, 1.0);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html
new file mode 100644
index 0000000000..9f5e55fe55
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html
@@ -0,0 +1,70 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+ <head>
+ <title>Test linearRampToValue with end time in the past</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test linearRampToValue with end time in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setTargetAtTime with a time in the past
+ test.gain.linearRampToValueAtTime(
+ 0.1, 0.5 * context.currentTime);
+ test.gain.linearRampToValueAtTime(0.9, 1.0);
+
+ reference.gain.linearRampToValueAtTime(
+ 0.1, context.currentTime);
+ reference.gain.linearRampToValueAtTime(0.9, 1.0);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html
new file mode 100644
index 0000000000..41a37bdb91
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html
@@ -0,0 +1,80 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+ <head>
+ <title>Test setTargetAtTime with start time in the past</title>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test setTargetAtTime with start time in the past'
+ },
+ (task, should) => {
+ // Use a sample rate that is a power of two to eliminate round-off
+ // in computing the currentTime.
+ let context = new OfflineAudioContext(2, 16384, 16384);
+ let source = new ConstantSourceNode(context);
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ let test = new GainNode(context);
+ let reference = new GainNode(context);
+
+ source.connect(test);
+ source.connect(reference);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setTargetAtTime with a time in the past
+ test.gain.setTargetAtTime(0.1, 0.5*context.currentTime, 0.1);
+ reference.gain.setTargetAtTime(0.1, context.currentTime, 0.1);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html
new file mode 100644
index 0000000000..32cdc6307f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html
@@ -0,0 +1,74 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Test setValueAtTime with startTime in the past</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test setValueAtTime with startTime in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ // Use a ramp of slope 1 per frame to measure time.
+ // The end value is the extent of exact precision in single
+ // precision float.
+ const rampEnd = context.length - suspendFrame;
+ const rampEndSeconds = context.length / context.sampleRate;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setValueAtTime with a time in the past
+ test.gain.setValueAtTime(0.0, 0.5 * context.currentTime);
+ test.gain.linearRampToValueAtTime(rampEnd, rampEndSeconds);
+
+ reference.gain.setValueAtTime(0.0, context.currentTime);
+ reference.gain.linearRampToValueAtTime(
+ rampEnd, rampEndSeconds);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html
new file mode 100644
index 0000000000..451b6ea829
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html
@@ -0,0 +1,67 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test SetValueCurve with start time in the past</title>
+ <script src=/resources/testharness.js></script>
+ <script src=/resources/testharnessreport.js></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="retrospective-test.js"></script>
+ </head>
+ </body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Test SetValueCurve with start time in the past'
+ },
+ (task, should) => {
+ let {context, source, test, reference} = setupRetrospectiveGraph();
+
+ // Suspend the context at this frame so we can synchronously set up
+ // automations.
+ const suspendFrame = 128;
+
+ context.suspend(suspendFrame / context.sampleRate)
+ .then(() => {
+ // Call setValueAtTime with a time in the past
+ test.gain.setValueCurveAtTime(
+ new Float32Array([1.0, 0.1]), 0.5 * context.currentTime,
+ 1.0);
+ reference.gain.setValueCurveAtTime(
+ new Float32Array([1.0, 0.1]), context.currentTime, 1.0);
+ })
+ .then(() => context.resume());
+
+ source.start();
+
+ context.startRendering()
+ .then(resultBuffer => {
+ let testValue = resultBuffer.getChannelData(0);
+ let referenceValue = resultBuffer.getChannelData(1);
+
+ // Until the suspendFrame, both should be exactly equal to 1.
+ should(
+ testValue.slice(0, suspendFrame),
+ `Test[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+ should(
+ referenceValue.slice(0, suspendFrame),
+ `Reference[0:${suspendFrame - 1}]`)
+ .beConstantValueOf(1);
+
+ // After the suspendFrame, both should be equal (and not
+ // constant)
+ should(
+ testValue.slice(suspendFrame), `Test[${suspendFrame}:]`)
+ .beEqualToArray(referenceValue.slice(suspendFrame));
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js
new file mode 100644
index 0000000000..bbda190f09
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-test.js
@@ -0,0 +1,29 @@
+// Create an audio graph on an offline context that consists of a
+// constant source and two gain nodes. One of the nodes is the node te
+// be tested and the other is the reference node. The output from the
+// test node is in channel 0 of the offline context; the reference
+// node is in channel 1.
+//
+// Returns a dictionary with the context, source node, the test node,
+// and the reference node.
+function setupRetrospectiveGraph() {
+ // Use a sample rate that is a power of two to eliminate round-off
+ // in computing the currentTime.
+ let context = new OfflineAudioContext(2, 16384, 16384);
+ let source = new ConstantSourceNode(context);
+
+ let test = new GainNode(context);
+ let reference = new GainNode(context);
+
+ source.connect(test);
+ source.connect(reference);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ return {context: context, source: source, test: test, reference: reference};
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html
new file mode 100644
index 0000000000..2ed076cccf
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/set-target-conv.html
@@ -0,0 +1,93 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+ <head>
+ <title>Test convergence of setTargetAtTime</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src='/webaudio/resources/audio-param.js'></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {task: 'setTargetAtTime', label: 'convergence handled correctly'},
+ (task, should) => {
+ // Two channels:
+ // 0 - actual result
+ // 1 - expected result
+ const context = new OfflineAudioContext(
+ {numberOfChannels: 2, sampleRate: 8000, length: 8000});
+
+ const merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Construct test source that will have tha AudioParams being tested
+ // to verify that the AudioParams are working correctly.
+ let src;
+
+ should(
+ () => src = new ConstantSourceNode(context),
+ 'src = new ConstantSourceNode(context)')
+ .notThrow();
+
+ src.connect(merger, 0, 0);
+ src.offset.setValueAtTime(1, 0);
+
+ const timeConstant = 0.01;
+
+ // testTime must be at least 10*timeConstant. Also, this must not
+ // lie on a render boundary.
+ const testTime = 0.15;
+ const rampEnd = testTime + 0.001;
+
+ should(
+ () => src.offset.setTargetAtTime(0.5, 0.01, timeConstant),
+ `src.offset.setTargetAtTime(0.5, 0.01, ${timeConstant})`)
+ .notThrow();
+ should(
+ () => src.offset.setValueAtTime(0.5, testTime),
+ `src.offset.setValueAtTime(0.5, ${testTime})`)
+ .notThrow();
+ should(
+ () => src.offset.linearRampToValueAtTime(1, rampEnd),
+ `src.offset.linearRampToValueAtTime(1, ${rampEnd})`)
+ .notThrow();
+
+ // The reference node that will generate the expected output. We do
+ // the same automations, except we don't apply the setTarget
+ // automation.
+ const refSrc = new ConstantSourceNode(context);
+ refSrc.connect(merger, 0, 1);
+
+ refSrc.offset.setValueAtTime(0.5, 0);
+ refSrc.offset.setValueAtTime(0.5, testTime);
+ refSrc.offset.linearRampToValueAtTime(1, rampEnd);
+
+ src.start();
+ refSrc.start();
+
+ context.startRendering()
+ .then(audio => {
+ const actual = audio.getChannelData(0);
+ const expected = audio.getChannelData(1);
+
+ // Just verify that the actual output matches the expected
+ // starting a little bit before testTime.
+ let testFrame =
+ Math.floor(testTime * context.sampleRate) - 128;
+ should(actual.slice(testFrame), `output[${testFrame}:]`)
+ .beCloseToArray(
+ expected.slice(testFrame),
+ {relativeThreshold: 4.1724e-6});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html
new file mode 100644
index 0000000000..827aeeabd4
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<title>Test setTargetAtTime after an event in the same processing block</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ const bufferSize = 179;
+ const valueStartOffset = 42;
+ const targetStartOffset = 53;
+ const sampleRate = 48000;
+ const scheduledValue = -0.5;
+
+ var context = new OfflineAudioContext(1, bufferSize, sampleRate);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(scheduledValue, valueStartOffset/sampleRate);
+ gain.gain.setTargetAtTime(scheduledValue, targetStartOffset/sampleRate,
+ 128/sampleRate);
+ gain.connect(context.destination);
+
+ // Apply unit DC signal to gain node.
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+ source.connect(gain);
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, bufferSize, "output buffer length");
+ var output = buffer.getChannelData(0);
+ var i = 0;
+ for (; i < valueStartOffset; ++i) {
+ // "Its default value is 1."
+ assert_equals(output[i], 1.0, "default gain at sample " + i);
+ }
+ for (; i < buffer.length; ++i) {
+ // "If the next event (having time T1) after this SetValue event is
+ // not of type LinearRampToValue or ExponentialRampToValue, then, for
+ // T0≤t<T1: v(t)=V".
+ // "Start exponentially approaching the target value at the given time
+ // with a rate having the given time constant."
+ // The target is the same value, and so the SetValue value continues.
+ assert_equals(output[i], scheduledValue,
+ "scheduled value at sample " + i);
+ }
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html
new file mode 100644
index 0000000000..36fde2b996
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<title>Test setValueAtTime with start time not on a block boundary</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ const bufferSize = 200;
+ const offset = 65;
+ const sampleRate = 48000;
+ const scheduledValue = -2.0;
+
+ var context = new OfflineAudioContext(1, bufferSize, sampleRate);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(scheduledValue, offset/sampleRate);
+ gain.connect(context.destination);
+
+ // Apply unit DC signal to gain node.
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+ source.connect(gain);
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, bufferSize, "output buffer length");
+ var output = buffer.getChannelData(0);
+ var i = 0;
+ for (; i < offset; ++i) {
+ // "Its default value is 1."
+ assert_equals(output[i], 1.0, "default gain at sample " + i);
+ }
+ for (; i < buffer.length; ++i) {
+ // "If there are no more events after this SetValue event, then for
+ // t≥T0, v(t)=V, where T0 is the startTime parameter and V is the
+ // value parameter."
+ assert_equals(output[i], scheduledValue,
+ "scheduled value at sample " + i);
+ }
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html
new file mode 100644
index 0000000000..dc324b22d6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test the invocation order of AudioWorklet.addModule() and BaseAudioContext
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ setup(() => {
+ let sampleRate = 48000;
+ let realtimeContext = new AudioContext();
+ let offlineContext = new OfflineAudioContext(1, sampleRate, sampleRate);
+
+ let filePath = 'processors/dummy-processor.js';
+
+ // Test if the browser does not crash upon addModule() call after the
+ // realtime context construction.
+ audit.define(
+ {label: 'module-loading-after-realtime-context-creation'},
+ (task, should) => {
+ let dummyWorkletNode =
+ new AudioWorkletNode(realtimeContext, 'dummy');
+ dummyWorkletNode.connect(realtimeContext.destination);
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode ' +
+ 'from realtime context')
+ .beTrue();
+ task.done();
+ });
+
+ // Test if the browser does not crash upon addModule() call after the
+ // offline context construction.
+ audit.define(
+ {label: 'module-loading-after-offline-context-creation'},
+ (task, should) => {
+ let dummyWorkletNode =
+ new AudioWorkletNode(offlineContext, 'dummy');
+ dummyWorkletNode.connect(offlineContext.destination);
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode ' +
+ 'from offline context')
+ .beTrue();
+ task.done();
+ });
+
+ Promise.all([
+ realtimeContext.audioWorklet.addModule(filePath),
+ offlineContext.audioWorklet.addModule(filePath)
+ ]).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html
new file mode 100644
index 0000000000..9e93f48ab8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-iterable.https.html
@@ -0,0 +1,205 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>
+ Test get parameterDescriptor as various iterables
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ </head>
+
+ <body>
+ <script id="params">
+ // A series of AudioParamDescriptors, copied one by one into various iterable
+ // data structures. This is used by both the processor side and the main
+ // thread side, so is in a different script tag.
+ const PARAMS = [
+ {
+ name: "a control-rate parameter",
+ defaultValue: 0.5,
+ minValue: 0,
+ maxValue: 1,
+ automationRate: "a-rate",
+ },
+ {
+ name: "你好",
+ defaultValue: 2.5,
+ minValue: 0,
+ maxValue: 7,
+ automationRate: "a-rate",
+ },
+ {
+ name: "🎶",
+ defaultValue: 8.5,
+ minValue: 0,
+ maxValue: 11115,
+ automationRate: "k-rate",
+ },
+ ];
+ </script>
+ <script id="processors" type="worklet">
+ registerProcessor("set",
+ class SetParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ var s = new Set();
+ s.add(PARAMS[0]);
+ s.add(PARAMS[1]);
+ s.add(PARAMS[2]);
+ return s;
+ }
+ constructor() { super(); }
+ process() {
+ }
+ });
+
+ registerProcessor("array",
+ class ArrayParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return PARAMS;
+ }
+ constructor() { super(); }
+ process() { }
+ });
+
+ function* gen() {
+ yield PARAMS[0];
+ yield PARAMS[1];
+ yield PARAMS[2];
+ }
+ registerProcessor("generator",
+ class GeneratorParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return gen();
+ }
+ constructor() { super(); }
+ process() { }
+ });
+ // Test a processor that has a get parameterDescriptors, but it returns
+ // something that is not iterable.
+ try {
+ registerProcessor("invalid",
+ class InvalidParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return 4;
+ }
+ constructor() { super(); }
+ process() { }
+ });
+ throw "This should not have been reached.";
+ } catch (e) {
+ // unclear how to signal success here, but we can signal failure in the
+ // developer console
+ if (e.name != "TypeError") {
+ throw "This should be TypeError";
+ }
+ }
+ // Test a processor that has a get parameterDescriptors, with a duplicate
+ // param name something that is not iterable.
+ try {
+ registerProcessor("duplicate-param-name",
+ class DuplicateParamProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ var p = {
+ name: "a",
+ defaultValue: 1,
+ minValue: 0,
+ maxValue: 1,
+ automationRate: "k-rate",
+ };
+ return [p,p];
+ }
+ constructor() { super(); }
+ process() { }
+ });
+ throw "This should not have been reached.";
+ } catch (e) {
+ // unclear how to signal success here, but we can signal failure in the
+ // developer console
+ if (e.name != "NotSupportedError") {
+ throw "This should be NotSupportedError";
+ }
+ }
+ // Test a processor that has a no get parameterDescriptors.
+ try {
+ registerProcessor("no-params",
+ class NoParamProcessor extends AudioWorkletProcessor {
+ constructor() { super(); }
+ process() { }
+ });
+ } catch (e) {
+ throw "Construction should have worked.";
+ }
+ </script>
+ <script>
+ setup({ explicit_done: true });
+ // Mangle the PARAMS object into a map that has the same shape as what an
+ // AudioWorkletNode.parameter property would
+ var PARAMS_MAP = new Map();
+ for (var param of PARAMS) {
+ var o = param;
+ var name = o.name;
+ delete o.name;
+ PARAMS_MAP.set(name, o);
+ }
+
+ // This compares `lhs` and `rhs`, that are two maplike with the same shape
+ // as PARAMS_MAP.
+ function compare(testname, lhs, rhs) {
+ equals(lhs.size, rhs.size, "Map match in size for " + testname);
+ var i = 0;
+ for (var [k, v] of lhs) {
+ is_true(rhs.has(k), testname + ": " + k + " exists in both maps");
+ var vrhs = rhs.get(k);
+ ["defaultValue", "minValue", "maxValue", "automationRate"].forEach(
+ paramKey => {
+ equals(
+ v[paramKey],
+ vrhs[paramKey],
+ `Values for ${k}.${paramKey} match for ${testname}`
+ );
+ }
+ );
+ }
+ }
+ var ac = new AudioContext();
+ var url = URLFromScriptsElements(["params", "processors"]);
+ ac.audioWorklet
+ .addModule(url)
+ .then(() => {
+ ["set", "array", "generator"].forEach(iterable => {
+ test(() => {
+ var node = new AudioWorkletNode(ac, iterable);
+ compare(iterable, node.parameters, PARAMS_MAP);
+ }, `Creating an AudioWorkletNode with a ${iterable} for
+ parameter descriptor worked`);
+ });
+ })
+ .then(function() {
+ test(function() {
+ assert_throws_dom("InvalidStateError", function() {
+ new AudioWorkletNode(ac, "invalid");
+ });
+ }, `Attempting to create an AudioWorkletNode with an non
+ iterable for parameter descriptor should not work`);
+ })
+ .then(function() {
+ test(() => {
+ new AudioWorkletNode(ac, "no-params");
+ }, `Attempting to create an AudioWorkletNode from a processor
+ that does not have a parameterDescriptors getter should work`);
+ })
+ .then(function() {
+ test(function() {
+ assert_throws_dom("InvalidStateError", function() {
+ new AudioWorkletNode(ac, "duplicate-param-name");
+ });
+ }, `Attempting to create an AudioWorkletNode with two parameter
+ descriptor with the same name should not work`);
+ }).then(function() {
+ done();
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html
new file mode 100644
index 0000000000..9578b26881
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html
@@ -0,0 +1,96 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test AudioParam Array Size
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+ let filePath = 'processors/param-size-processor.js';
+ let context;
+
+ // Use a power of two so there's no roundoff computing times from frames.
+ let sampleRate = 16384;
+
+ // Sets up AudioWorklet and OfflineAudioContext.
+ audit.define('Initializing AudioWorklet and Context', (task, should) => {
+ should(() => {
+ context = new OfflineAudioContext(
+ 1, 10 * RENDER_QUANTUM_FRAMES, sampleRate);
+ }, 'Creating offline context for testing').notThrow();
+
+ should(
+ context.audioWorklet.addModule(filePath), 'Creating test worklet')
+ .beResolved()
+ .then(() => {
+ task.done();
+ });
+ });
+
+ audit.define('Verify Size of AudioParam Arrays', (task, should) => {
+ let node = new AudioWorkletNode(context, 'param-size');
+ let nodeParam = node.parameters.get('param');
+
+ node.connect(context.destination);
+
+ let renderQuantumDuration = RENDER_QUANTUM_FRAMES / context.sampleRate;
+
+ // Set up some automations, after one render quantum. We want the first
+ // render not to have any automations, just to be sure we handle that
+ // case correctly.
+ context.suspend(renderQuantumDuration)
+ .then(() => {
+ let now = context.currentTime;
+
+ // Establish the first automation event.
+ nodeParam.setValueAtTime(1, now);
+ // The second render should be constant
+ nodeParam.setValueAtTime(0, now + renderQuantumDuration);
+ // The third render and part of the fourth is a linear ramp
+ nodeParam.linearRampToValueAtTime(
+ 1, now + 2.5 * renderQuantumDuration);
+ // Everything afterwards should be constant.
+ })
+ .then(() => context.resume());
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ let data = renderedBuffer.getChannelData(0);
+
+ // The very first render quantum should be constant, so the array
+ // has length 1.
+ should(
+ data.slice(0, RENDER_QUANTUM_FRAMES),
+ 'Render quantum 0: array size')
+ .beConstantValueOf(1);
+
+ should(
+ data.slice(RENDER_QUANTUM_FRAMES, 2 * RENDER_QUANTUM_FRAMES),
+ 'Render quantum 1: array size')
+ .beConstantValueOf(1);
+
+ should(
+ data.slice(
+ 2 * RENDER_QUANTUM_FRAMES, 4 * RENDER_QUANTUM_FRAMES),
+ 'Render quantum 2-3: array size')
+ .beConstantValueOf(RENDER_QUANTUM_FRAMES);
+
+ should(
+ data.slice(4 * RENDER_QUANTUM_FRAMES),
+ 'Remaining renders: array size')
+ .beConstantValueOf(1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html
new file mode 100644
index 0000000000..8e51470f64
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html
@@ -0,0 +1,85 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's basic AudioParam features
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 48000;
+ let renderLength = 48000 * 0.6;
+ let context;
+
+ let filePath = 'processors/gain-processor.js';
+
+ // Sets up AudioWorklet and OfflineAudioContext.
+ audit.define('Initializing AudioWorklet and Context', (task, should) => {
+ context = new OfflineAudioContext(1, renderLength, sampleRate);
+ context.audioWorklet.addModule(filePath).then(() => {
+ task.done();
+ });
+ });
+
+ // Verifies the functionality of AudioParam in AudioWorkletNode by
+ // comparing (canceling out) values from GainNode and AudioWorkletNode
+ // with simple gain computation code by AudioParam.
+ audit.define(
+ 'Verifying AudioParam in AudioWorkletNode',
+ (task, should) => {
+ let constantSourceNode = new ConstantSourceNode(context);
+ let gainNode = new GainNode(context);
+ let inverterNode = new GainNode(context, {gain: -1});
+ let gainWorkletNode = new AudioWorkletNode(context, 'gain');
+ let gainWorkletParam = gainWorkletNode.parameters.get('gain');
+
+ // Test default value and setter/getter functionality.
+ should(gainWorkletParam.value,
+ 'Default gain value of gainWorkletNode')
+ .beEqualTo(Math.fround(0.707));
+ gainWorkletParam.value = 0.1;
+ should(gainWorkletParam.value,
+ 'Value of gainWorkletParam after setter = 0.1')
+ .beEqualTo(Math.fround(0.1));
+
+ constantSourceNode.connect(gainNode)
+ .connect(inverterNode)
+ .connect(context.destination);
+ constantSourceNode.connect(gainWorkletNode)
+ .connect(context.destination);
+
+ // With arbitrary times and values, test all possible AudioParam
+ // automations.
+ [gainNode.gain, gainWorkletParam].forEach((param) => {
+ param.setValueAtTime(0, 0);
+ param.linearRampToValueAtTime(1, 0.1);
+ param.exponentialRampToValueAtTime(0.5, 0.2);
+ param.setValueCurveAtTime([0, 2, 0.3], 0.2, 0.1);
+ param.setTargetAtTime(0.01, 0.4, 0.5);
+ });
+
+ // Test if the setter works correctly in the middle of rendering.
+ context.suspend(0.5).then(() => {
+ gainNode.gain.value = 1.5;
+ gainWorkletParam.value = 1.5;
+ context.resume();
+ });
+
+ constantSourceNode.start();
+ context.startRendering().then((renderedBuffer) => {
+ should(renderedBuffer.getChannelData(0),
+ 'The rendered buffer')
+ .beConstantValueOf(0);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html
new file mode 100644
index 0000000000..546bd1d0d0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html
@@ -0,0 +1,66 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test MessagePort in AudioWorkletNode and AudioWorkletProcessor
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context = new AudioContext();
+
+ let filePath = 'processors/port-processor.js';
+
+ // Creates an AudioWorkletNode and sets an EventHandler on MessagePort
+ // object. The associated PortProcessor will post a message upon its
+ // construction. Test if the message is received correctly.
+ audit.define(
+ 'Test postMessage from AudioWorkletProcessor to AudioWorkletNode',
+ (task, should) => {
+ let porterWorkletNode =
+ new AudioWorkletNode(context, 'port-processor');
+
+ // Upon the creation of PortProcessor, it will post a message to the
+ // node with 'created' status.
+ porterWorkletNode.port.onmessage = (event) => {
+ should(event.data.state,
+ 'The initial message from PortProcessor')
+ .beEqualTo('created');
+ task.done();
+ };
+ });
+
+ // PortProcessor is supposed to echo the message back to the
+ // AudioWorkletNode.
+ audit.define(
+ 'Test postMessage from AudioWorkletNode to AudioWorkletProcessor',
+ (task, should) => {
+ let porterWorkletNode =
+ new AudioWorkletNode(context, 'port-processor');
+
+ porterWorkletNode.port.onmessage = (event) => {
+ // Ignore if the delivered message has |state|. This is already
+ // tested in the previous task.
+ if (event.data.state)
+ return;
+
+ should(event.data.message,
+ 'The response from PortProcessor')
+ .beEqualTo('hello');
+ task.done();
+ };
+
+ porterWorkletNode.port.postMessage('hello');
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
new file mode 100644
index 0000000000..a5dd004981
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
@@ -0,0 +1,76 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test passing SharedArrayBuffer to an AudioWorklet
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context = new AudioContext();
+
+ let filePath = 'processors/sharedarraybuffer-processor.js';
+
+ audit.define(
+ 'Test postMessage from AudioWorkletProcessor to AudioWorkletNode',
+ (task, should) => {
+ let workletNode =
+ new AudioWorkletNode(context, 'sharedarraybuffer-processor');
+
+ // After it is created, the worklet will send a new
+ // SharedArrayBuffer to the main thread.
+ //
+ // The worklet will then wait to receive a message from the main
+ // thread.
+ //
+ // When it receives the message, it will check whether it is a
+ // SharedArrayBuffer, and send this information back to the main
+ // thread.
+
+ workletNode.port.onmessage = (event) => {
+ let data = event.data;
+ switch (data.state) {
+ case 'created':
+ should(
+ data.sab instanceof SharedArrayBuffer,
+ 'event.data.sab from worklet is an instance of SharedArrayBuffer')
+ .beTrue();
+
+ // Send a SharedArrayBuffer back to the worklet.
+ let sab = new SharedArrayBuffer(8);
+ workletNode.port.postMessage(sab);
+ break;
+
+ case 'received message':
+ should(data.isSab, 'event.data from main thread is an instance of SharedArrayBuffer')
+ .beTrue();
+ task.done();
+ break;
+
+ default:
+ should(false,
+ `Got unexpected message from worklet: ${data.state}`)
+ .beTrue();
+ task.done();
+ break;
+ }
+ };
+
+ workletNode.port.onmessageerror = (event) => {
+ should(false, 'Got messageerror from worklet').beTrue();
+ task.done();
+ };
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ </script>
+ </body>
+</html>
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers
new file mode 100644
index 0000000000..63b60e490f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers
@@ -0,0 +1,2 @@
+Cross-Origin-Opener-Policy: same-origin
+Cross-Origin-Embedder-Policy: require-corp
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html
new file mode 100644
index 0000000000..718cadffc7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-called-on-globalthis.https.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletGlobalScope's registerProcessor() called on globalThis
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const realtimeContext = new AudioContext();
+ const filePath = 'processors/dummy-processor-globalthis.js';
+
+ audit.define('registerprocessor-called-on-globalthis', (task, should) => {
+ realtimeContext.audioWorklet.addModule(filePath).then(() => {
+ const dummyWorkletNode = new AudioWorkletNode(realtimeContext, 'dummy-globalthis');
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode').beTrue();
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html
new file mode 100644
index 0000000000..de31f71427
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-registerprocessor-dynamic.https.html
@@ -0,0 +1,36 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test dynamic registerProcessor() calls in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script>
+ const t = async_test('Dynamic registration in AudioWorkletGlobalScope');
+
+ const realtimeContext = new AudioContext();
+ const filePath = 'processors/dynamic-register-processor.js';
+
+ // Test if registering an AudioWorkletProcessor dynamically (after the
+ // initial module script loading) works correctly. In the construction of
+ // nodeB (along with ProcessorB), it registers ProcessorA's definition.
+ realtimeContext.audioWorklet.addModule(filePath).then(() => {
+ const nodeB = new AudioWorkletNode(realtimeContext, 'ProcessorB');
+ assert_true(nodeB instanceof AudioWorkletNode,
+ 'nodeB should be instance of AudioWorkletNode');
+ nodeB.port.postMessage({});
+ nodeB.port.onmessage = () => {
+ const nodeA = new AudioWorkletNode(realtimeContext, 'ProcessorA');
+ t.step(() => {
+ assert_true(nodeA instanceof AudioWorkletNode,
+ 'nodeA should be instance of AudioWorkletNode');
+ });
+ t.done();
+ };
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html
new file mode 100644
index 0000000000..685546aeb5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html
@@ -0,0 +1,39 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test if activation of worklet thread does not resume context rendering.
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const context = new AudioContext();
+ const filePath = 'processors/dummy-processor.js';
+
+ context.suspend();
+
+ // Suspends the context right away and then activate worklet. The current
+ // time must not advance since the context is suspended.
+ audit.define(
+ {label: 'load-worklet-and-suspend'},
+ async (task, should) => {
+ await context.audioWorklet.addModule(filePath);
+ const suspendTime = context.currentTime;
+ const dummy = new AudioWorkletNode(context, 'dummy');
+ dummy.connect(context.destination);
+ return task.timeout(() => {
+ should(context.currentTime === suspendTime,
+ 'context.currentTime did not change after worklet started')
+ .beTrue();
+ should(context.state, 'context.state').beEqualTo('suspended');
+ }, 500);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html
new file mode 100644
index 0000000000..3a480464e9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-throw-onmessage.https.html
@@ -0,0 +1,62 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="utf-8" />
+ <title>
+ Test the behaviour of AudioWorkletProcessor when an `onmessage` handler
+ throws.
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ </head>
+
+ <body>
+ <script id="processor" type="worklet">
+ registerProcessor("test-throw", class param extends AudioWorkletProcessor {
+ constructor() {
+ super()
+ this.i = 0;
+ this.port.onmessage = function(arg) {
+ throw "asdasd";
+ }
+ }
+ process(input, output, parameters) {
+ this.i++;
+ this.port.postMessage(this.i);
+ return true;
+ }
+ });
+ </script>
+ <script>
+ var latestIndexReceived = 0;
+ var node = null;
+ var ac = null;
+ promise_setup(function() {
+ ac = new AudioContext();
+ var url = URLFromScriptsElements(["processor"]);
+ return ac.audioWorklet.addModule(url).then(function() {
+ node = new AudioWorkletNode(ac, "test-throw");
+ node.port.onmessage = function(e) {
+ latestIndexReceived = parseInt(e.data);
+ };
+ });
+ });
+ promise_test(async t => {
+ var currentIndex = latestIndexReceived;
+ await t.step_wait(() => {
+ return latestIndexReceived > currentIndex;
+ }, "Process is still being called");
+
+ node.port.postMessage("asdasd"); // This throws on the processor side.
+ node.onprocessorerror = function() {
+ assert_true(false, "onprocessorerror must not be called.");
+ };
+ currentIndex = latestIndexReceived;
+ await t.step_wait(() => {
+ return latestIndexReceived > currentIndex + 2;
+ }, "Process is still being called");
+ }, `Throwing in an onmessage handler in the AudioWorkletGlobalScope shouldn't stop AudioWorkletProcessor`);
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
new file mode 100644
index 0000000000..84458d0aaa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test sampleRate in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ setup(() => {
+ let sampleRate = 48000;
+ let renderLength = 512;
+ let context = new OfflineAudioContext(1, renderLength, sampleRate);
+
+ let filePath = 'processors/one-pole-processor.js';
+
+ // Without rendering the context, attempt to access |sampleRate| in the
+ // global scope as soon as it is created.
+ audit.define(
+ 'Query |sampleRate| upon AudioWorkletGlobalScope construction',
+ (task, should) => {
+ let onePoleFilterNode =
+ new AudioWorkletNode(context, 'one-pole-filter');
+ let frequencyParam = onePoleFilterNode.parameters.get('frequency');
+
+ should(frequencyParam.maxValue,
+ 'frequencyParam.maxValue')
+ .beEqualTo(0.5 * context.sampleRate);
+
+ task.done();
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
new file mode 100644
index 0000000000..5f4bee7c53
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test currentTime and currentFrame in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ setup(() => {
+ let sampleRate = 48000;
+ let renderLength = 512;
+ let context = new OfflineAudioContext(1, renderLength, sampleRate);
+
+ let filePath = 'processors/timing-info-processor.js';
+
+ audit.define(
+ 'Check the timing information from AudioWorkletProcessor',
+ (task, should) => {
+ let portWorkletNode =
+ new AudioWorkletNode(context, 'timing-info-processor');
+ portWorkletNode.connect(context.destination);
+
+ // Suspend at render quantum boundary and check the timing
+ // information between the main thread and the rendering thread.
+ [0, 128, 256, 384].map((suspendFrame) => {
+ context.suspend(suspendFrame/sampleRate).then(() => {
+ portWorkletNode.port.onmessage = (event) => {
+ should(event.data.currentFrame,
+ 'currentFrame from the processor at ' + suspendFrame)
+ .beEqualTo(suspendFrame);
+ should(event.data.currentTime,
+ 'currentTime from the processor at '
+ + context.currentTime)
+ .beEqualTo(context.currentTime);
+ context.resume();
+ };
+
+ portWorkletNode.port.postMessage('query-timing-info');
+ });
+ });
+
+ context.startRendering().then(() => {
+ task.done();
+ });
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html
new file mode 100644
index 0000000000..330b359f7d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's automatic pull feature
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ // Arbitrary sample rate. Anything should work.
+ const sampleRate = 48000;
+ const renderLength = RENDER_QUANTUM_FRAMES * 2;
+ const channelCount = 1;
+ const filePath = 'processors/zero-output-processor.js';
+
+ const sourceOffset = 0.5;
+
+ // Connect a constant source node to the zero-output AudioWorkletNode.
+ // Then verify if it captures the data correctly.
+ audit.define('setup-worklet', (task, should) => {
+ const context =
+ new OfflineAudioContext(channelCount, renderLength, sampleRate);
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ let testSource =
+ new ConstantSourceNode(context, { offset: sourceOffset });
+ let zeroOutputWorkletNode =
+ new AudioWorkletNode(context, 'zero-output-processor', {
+ numberOfInputs: 1,
+ numberOfOutputs: 0,
+ processorOptions: {
+ bufferLength: renderLength,
+ channeCount: channelCount
+ }
+ });
+
+ // Start the source and stop at the first render quantum.
+ testSource.connect(zeroOutputWorkletNode);
+ testSource.start();
+ testSource.stop(RENDER_QUANTUM_FRAMES/sampleRate);
+
+ zeroOutputWorkletNode.port.onmessage = (event) => {
+ // The |capturedBuffer| can be multichannel. Iterate through it.
+ for (let i = 0; i < event.data.capturedBuffer.length; ++i) {
+ let buffer = event.data.capturedBuffer[i];
+ // Split the captured buffer in half for the easier test.
+ should(buffer.subarray(0, RENDER_QUANTUM_FRAMES),
+ 'The first half of the captured buffer')
+ .beConstantValueOf(sourceOffset);
+ should(buffer.subarray(RENDER_QUANTUM_FRAMES, renderLength),
+ 'The second half of the captured buffer')
+ .beConstantValueOf(0);
+ }
+ task.done();
+ };
+
+ // Starts the rendering, but we don't need the rendered buffer from
+ // the context.
+ context.startRendering();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html
new file mode 100644
index 0000000000..11c237f19d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's dynamic channel count feature
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary numbers used to align the test with render quantum boundary.
+ let sampleRate = RENDER_QUANTUM_FRAMES * 100;
+ let renderLength = RENDER_QUANTUM_FRAMES * 2;
+ let context;
+
+ let filePath = 'processors/gain-processor.js';
+
+ let testChannelValues = [1, 2, 3];
+
+ // Creates a 3-channel buffer and play with BufferSourceNode. The source
+ // goes through a bypass AudioWorkletNode (gain value of 1).
+ audit.define('setup-buffer-and-worklet', (task, should) => {
+ context = new OfflineAudioContext(testChannelValues.length,
+ renderLength,
+ sampleRate);
+
+ // Explicitly sets the destination channelCountMode and
+ // channelInterpretation to make sure the result does no mixing.
+ context.channeCountMode = 'explicit';
+ context.channelInterpretation = 'discrete';
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ let testBuffer = createConstantBuffer(context, 1, testChannelValues);
+ let sourceNode = new AudioBufferSourceNode(context);
+ let gainWorkletNode = new AudioWorkletNode(context, 'gain');
+
+ gainWorkletNode.parameters.get('gain').value = 1.0;
+ sourceNode.connect(gainWorkletNode).connect(context.destination);
+
+ // Suspend the context at 128 sample frames and play the source with
+ // the assigned buffer.
+ context.suspend(RENDER_QUANTUM_FRAMES/sampleRate).then(() => {
+ sourceNode.buffer = testBuffer;
+ sourceNode.loop = true;
+ sourceNode.start();
+ context.resume();
+ });
+ task.done();
+ });
+ });
+
+ // Verifies if the rendered buffer has all zero for the first half (before
+ // 128 samples) and the expected values for the second half.
+ audit.define('verify-rendered-buffer', (task, should) => {
+ context.startRendering().then(renderedBuffer => {
+ testChannelValues.forEach((value, index) => {
+ let channelData = renderedBuffer.getChannelData(index);
+ should(channelData.subarray(0, RENDER_QUANTUM_FRAMES),
+ 'First half of Channel #' + index)
+ .beConstantValueOf(0);
+ should(channelData.subarray(RENDER_QUANTUM_FRAMES, renderLength),
+ 'Second half of Channel #' + index)
+ .beConstantValueOf(value);
+ });
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html
new file mode 100644
index 0000000000..8b7704a781
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html
@@ -0,0 +1,53 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test the construction of AudioWorkletNode with real-time context
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let realtimeContext = new AudioContext();
+
+ let filePath = 'processors/dummy-processor.js';
+
+ // Test if an exception is thrown correctly when AWN constructor is
+ // invoked before resolving |.addModule()| promise.
+ audit.define(
+ {label: 'construction-before-module-loading'},
+ (task, should) => {
+ should(() => new AudioWorkletNode(realtimeContext, 'dummy'),
+ 'Creating a node before loading a module should throw.')
+ .throw(DOMException, 'InvalidStateError');
+
+ task.done();
+ });
+
+ // Test the construction of AudioWorkletNode after the resolution of
+ // |.addModule()|. Also the constructor must throw an exception when
+ // a unregistered node name was given.
+ audit.define(
+ {label: 'construction-after-module-loading'},
+ (task, should) => {
+ realtimeContext.audioWorklet.addModule(filePath).then(() => {
+ let dummyWorkletNode =
+ new AudioWorkletNode(realtimeContext, 'dummy');
+ should(dummyWorkletNode instanceof AudioWorkletNode,
+ '"dummyWorkletNode" is an instance of AudioWorkletNode')
+ .beTrue();
+ should(() => new AudioWorkletNode(realtimeContext, 'foobar'),
+ 'Unregistered name "foobar" must throw an exception.')
+ .throw();
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html
new file mode 100644
index 0000000000..d3347d265e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html
@@ -0,0 +1,149 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test of AudioWorkletNodeOptions
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const sampleRate = 48000;
+
+ const audit = Audit.createTaskRunner();
+ let context;
+
+ let filePath = 'processors/dummy-processor.js';
+
+ // Load script file and create a OfflineAudiocontext.
+ audit.define('setup', (task, should) => {
+ context = new OfflineAudioContext(1, 1, sampleRate);
+ context.audioWorklet.addModule(filePath).then(() => {
+ task.done();
+ });
+ });
+
+ // Test AudioWorkletNode construction without AudioWorkletNodeOptions.
+ audit.define('without-audio-node-options', (task, should) => {
+ let testNode;
+ should(
+ () => testNode = new AudioWorkletNode(context, 'dummy'),
+ 'Creating AudioWOrkletNode without options')
+ .notThrow();
+ should(testNode instanceof AudioWorkletNode,
+ 'testNode is instance of AudioWorkletNode').beEqualTo(true);
+ should(testNode.numberOfInputs,
+ 'testNode.numberOfInputs (default)').beEqualTo(1);
+ should(testNode.numberOfOutputs,
+ 'testNode.numberOfOutputs (default)').beEqualTo(1);
+ should(testNode.channelCount,
+ 'testNode.channelCount (default)').beEqualTo(2);
+ should(testNode.channelCountMode,
+ 'testNode.channelCountMode (default)').beEqualTo('max');
+ should(testNode.channelInterpretation,
+ 'testNode.channelInterpretation (default)')
+ .beEqualTo('speakers');
+ task.done();
+ });
+
+ // Test AudioWorkletNode constructor with AudioNodeOptions.
+ audit.define('audio-node-options', (task, should) => {
+ const options = {
+ numberOfInputs: 7,
+ numberOfOutputs: 18,
+ channelCount: 4,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'discrete'
+ };
+ const optionsString = JSON.stringify(options);
+
+ let testNode;
+ should(
+ () => testNode = new AudioWorkletNode(context, 'dummy', options),
+ 'Creating AudioWOrkletNode with options: ' + optionsString)
+ .notThrow();
+ should(testNode.numberOfInputs,
+ 'testNode.numberOfInputs').beEqualTo(options.numberOfInputs);
+ should(testNode.numberOfOutputs,
+ 'testNode.numberOfOutputs').beEqualTo(options.numberOfOutputs);
+ should(testNode.channelCount,
+ 'testNode.channelCount').beEqualTo(options.channelCount);
+ should(testNode.channelCountMode,
+ 'testNode.channelCountMode').beEqualTo(options.channelCountMode);
+ should(testNode.channelInterpretation,
+ 'testNode.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ task.done();
+ });
+
+ // Test AudioWorkletNode.channelCount.
+ audit.define('channel-count', (task, should) => {
+ const options1 = {channelCount: 17};
+ let testNode = new AudioWorkletNode(context, 'dummy', options1);
+ should(testNode.channelCount, 'testNode.channelCount')
+ .beEqualTo(options1.channelCount);
+
+ const options2 = {channelCount: 0};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options2),
+ 'Creating AudioWorkletNode with channelCount 0')
+ .throw(DOMException, 'NotSupportedError');
+
+ const options3 = {channelCount: 33};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options3),
+ 'Creating AudioWorkletNode with channelCount 33')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ // Test AudioWorkletNode.channelCountMode.
+ audit.define('channel-count-mode', (task, should) => {
+ const channelCountModes = ['max', 'clamped-max', 'explicit'];
+ channelCountModes.forEach((mode) => {
+ const options = {channelCountMode: mode};
+ let testNode = new AudioWorkletNode(context, 'dummy', options);
+ should(testNode.channelCountMode,
+ 'testNode.channelCountMode (set via options.' + mode + ')')
+ .beEqualTo(options.channelCountMode);
+ });
+
+ const options1 = {channelCountMode: 'foobar'};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options1),
+ 'Creating AudioWorkletNode with channelCountMode "foobar"')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // Test AudioWorkletNode.channelInterpretation.
+ audit.define('channel-interpretation', (task, should) => {
+ const channelInterpretations = ['speakers', 'discrete'];
+ channelInterpretations.forEach((interpretation) => {
+ const options = {channelInterpretation: interpretation};
+ let testNode = new AudioWorkletNode(context, 'dummy', options);
+ should(
+ testNode.channelInterpretation,
+ 'testNode.channelInterpretation (set via options.' +
+ interpretation + ')')
+ .beEqualTo(options.channelInterpretation);
+ });
+
+ const options1 = {channelInterpretation: 'foobar'};
+ should(
+ () => new AudioWorkletNode(context, 'dummy', options1),
+ 'Creating AudioWorkletNode with channelInterpretation "foobar"')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html
new file mode 100644
index 0000000000..c58502af01
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html
@@ -0,0 +1,100 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test AudioWorkletNode's Disconnected Input Array Length
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary numbers used to align the test with render quantum boundary.
+ // The sample rate is a power of two to eliminate roundoff in computing
+ // the suspend time needed for the test.
+ let sampleRate = 16384;
+ let renderLength = 8 * RENDER_QUANTUM_FRAMES;
+ let context;
+
+ let filePath = 'processors/input-length-processor.js';
+
+ let testChannelValues = [1, 2, 3];
+
+ // Creates a 3-channel buffer and play with BufferSourceNode. The source
+ // goes through a bypass AudioWorkletNode (gain value of 1).
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Input array length should be zero for disconnected input'
+ },
+ (task, should) => {
+ context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: renderLength,
+ sampleRate: sampleRate
+ });
+
+ context.audioWorklet.addModule(filePath).then(() => {
+ let sourceNode = new ConstantSourceNode(context);
+ let workletNode =
+ new AudioWorkletNode(context, 'input-length-processor');
+
+ workletNode.connect(context.destination);
+
+ // Connect the source now.
+ let connectFrame = RENDER_QUANTUM_FRAMES;
+
+ context.suspend(connectFrame / sampleRate)
+ .then(() => {
+ sourceNode.connect(workletNode);
+ })
+ .then(() => context.resume());
+ ;
+
+ // Then disconnect the source after a few renders
+ let disconnectFrame = 3 * RENDER_QUANTUM_FRAMES;
+ context.suspend(disconnectFrame / sampleRate)
+ .then(() => {
+ sourceNode.disconnect(workletNode);
+ })
+ .then(() => context.resume());
+
+ sourceNode.start();
+ context.startRendering()
+ .then(resultBuffer => {
+ let data = resultBuffer.getChannelData(0);
+
+ should(
+ data.slice(0, connectFrame),
+ 'Before connecting the source: Input array length')
+ .beConstantValueOf(0);
+
+ // Find where the output is no longer 0.
+ let nonZeroIndex = data.findIndex(x => x > 0);
+ should(nonZeroIndex, 'First non-zero output')
+ .beEqualTo(connectFrame);
+
+ should(
+ data.slice(
+ nonZeroIndex,
+ nonZeroIndex + (disconnectFrame - connectFrame)),
+ 'While source is connected: Input array length')
+ .beConstantValueOf(RENDER_QUANTUM_FRAMES);
+ should(
+ data.slice(disconnectFrame),
+ 'After disconnecting the source: Input array length')
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
new file mode 100644
index 0000000000..5bbb7304d9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
@@ -0,0 +1,60 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test onprocessorerror handler in AudioWorkletNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ const filePath = 'processors/error-processor.js';
+ const sampleRate = 48000;
+ const renderLength = sampleRate * 0.1;
+ const context = new OfflineAudioContext(1, renderLength, sampleRate);
+
+ // Test |onprocessorerror| called upon failure of processor constructor.
+ audit.define('constructor-error', (task, should) => {
+ const constructorErrorWorkletNode =
+ new AudioWorkletNode(context, 'constructor-error');
+ constructorErrorWorkletNode.onprocessorerror = (error) => {
+ should(error instanceof ErrorEvent,
+ `onprocessorerror argument should be an ErrorEvent when
+ the constructor of AudioWorkletProcessor has an error.`)
+ .beTrue();
+
+ // Without 'processorerror' event callback, this test will be
+ // timed out.
+ task.done();
+ };
+ });
+
+ // Test |onprocessorerror| called upon failure of process() method.
+ audit.define('process-error', (task, should) => {
+ const processErrorWorkletNode =
+ new AudioWorkletNode(context, 'process-error');
+ processErrorWorkletNode.onprocessorerror = (error) => {
+ should(error instanceof ErrorEvent,
+ `onprocessorerror argument should be an ErrorEvent when
+ the process method of the AudioWorkletProcessor method
+ has an error.`)
+ .beTrue();
+
+ // Without 'processorerror' event callback, this test will be
+ // timed out.
+ task.done();
+ };
+
+ context.startRendering();
+ });
+
+ // 'error-processor.js' contains 2 class definitions represents an error
+ // in the constructor and an error in the process method respectively.
+ context.audioWorklet.addModule(filePath).then(() => audit.run());
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
new file mode 100644
index 0000000000..8dafa2f811
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
@@ -0,0 +1,80 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test the construction of AudioWorkletNode with real-time context
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const context = new AudioContext();
+
+ setup(function () {
+ context.audioWorklet.addModule(
+ 'processors/channel-count-processor.js').then(() => audit.run());
+
+ // Test if the output channe count dynamically changes if the input
+ // and output is 1.
+ audit.define(
+ {label: 'Dynamically change the channel count to if unspecified.'},
+ (task, should) => {
+ // Use arbitrary parameters for the test.
+ const buffer = new AudioBuffer({
+ numberOfChannels: 17,
+ length: 1,
+ sampleRate: context.sampleRate,
+ });
+ const source = new AudioBufferSourceNode(context);
+ source.buffer = buffer;
+
+ const node = new AudioWorkletNode(context, 'channel-count', {
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ });
+
+ node.port.onmessage = (message) => {
+ const expected = message.data;
+ should(expected.outputChannel,
+ 'The expected output channel count').beEqualTo(17);
+ task.done();
+ };
+
+ // We need to make an actual connection becasue the channel count
+ // change happen when the rendering starts. It is to test if the
+ // channel count adapts to the upstream node correctly.
+ source.connect(node).connect(context.destination);
+ source.start();
+ });
+
+ // Test if outputChannelCount is honored as expected even if the input
+ // and output is 1.
+ audit.define(
+ {label: 'Givien outputChannelCount must be honored.'},
+ (task, should) => {
+ const node = new AudioWorkletNode(
+ context, 'channel-count', {
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ outputChannelCount: [2],
+ });
+
+ node.port.onmessage = (message) => {
+ const expected = message.data;
+ should(expected.outputChannel,
+ 'The expected output channel count').beEqualTo(2);
+ task.done();
+ };
+
+ // We need to make an actual connection becasue the channel count
+ // change might happen when the rendering starts. It is to test
+ // if the specified channel count is kept correctly.
+ node.connect(context.destination);
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html
new file mode 100644
index 0000000000..ea840ed11a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html
@@ -0,0 +1,77 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test cross-thread passing of AudioWorkletNodeOptions
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ const context = new AudioContext();
+
+ let filePath = 'processors/option-test-processor.js';
+
+ // Create a OptionTestProcessor and feed |processorData| to it. The
+ // processor should echo the received data to the node's |onmessage|
+ // handler.
+ audit.define('valid-processor-data', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ let processorOptions = {
+ description: 'foo',
+ payload: [0, 1, 2, 3]
+ };
+
+ let optionTestNode =
+ new AudioWorkletNode(context, 'option-test-processor', {
+ processorOptions: processorOptions
+ });
+
+ optionTestNode.port.onmessage = (event) => {
+ should(event.data.processorOptions.description,
+ '|description| field in processorOptions from processor("' +
+ event.data.processorOptions.description + '")')
+ .beEqualTo(processorOptions.description,
+ 'the field in node constructor options ("' +
+ processorOptions.description + '")');
+ should(event.data.processorOptions.payload,
+ '|payload| array in processorOptions from processor([' +
+ event.data.processorOptions.payload + '])')
+ .beEqualToArray([0, 1, 2, 3],
+ 'the array in node constructor options ([' +
+ event.data.processorOptions.payload + '])');
+ task.done();
+ };
+ });
+ });
+
+
+ // Passing empty option dictionary should work without a problem.
+ audit.define('empty-option', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ let optionTestNode =
+ new AudioWorkletNode(context, 'option-test-processor');
+
+ optionTestNode.port.onmessage = (event) => {
+ should(Object.keys(event.data).length,
+ 'Number of properties in data from processor')
+ .beEqualTo(2);
+ should(event.data.numberOfInputs,
+ '|numberOfInputs| field in data from processor')
+ .beEqualTo(1);
+ should(event.data.numberOfOutputs,
+ '|numberOfOutputs| field in data from processor')
+ .beEqualToArray(1);
+ task.done();
+ };
+ });
+ });
+
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html
new file mode 100644
index 0000000000..e3fb6e533d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-param-getter-overridden.https.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test if AudioWorkletProcessor with invalid parameters array getter
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrarily determined. Any numbers should work.
+ let sampleRate = 16000;
+ let renderLength = 1280;
+ let context;
+ let filePath = 'processors/invalid-param-array-processor.js';
+
+ audit.define('Initializing AudioWorklet and Context', async (task) => {
+ context = new OfflineAudioContext(1, renderLength, sampleRate);
+ await context.audioWorklet.addModule(filePath);
+ task.done();
+ });
+
+ audit.define('Verifying AudioParam in AudioWorkletNode',
+ async (task, should) => {
+ let buffer = context.createBuffer(1, 2, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+
+ let source = new AudioBufferSourceNode(context);
+ source.buffer = buffer;
+ source.loop = true;
+ source.start();
+
+ let workletNode1 =
+ new AudioWorkletNode(context, 'invalid-param-array-1');
+ let workletNode2 =
+ new AudioWorkletNode(context, 'invalid-param-array-2');
+ workletNode1.connect(workletNode2).connect(context.destination);
+
+ // Manually invoke the param getter.
+ source.connect(workletNode2.parameters.get('invalidParam'));
+
+ const renderedBuffer = await context.startRendering();
+
+ // |workletNode2| should be no-op after the parameter getter is
+ // invoked. Therefore, the rendered result should be silent.
+ should(renderedBuffer.getChannelData(0), 'The rendered buffer')
+ .beConstantValueOf(0);
+ task.done();
+ }
+ );
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html
new file mode 100644
index 0000000000..33627204a6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-frozen-array.https.html
@@ -0,0 +1,53 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test given arrays within AudioWorkletProcessor.process() method
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+ const filePath = 'processors/array-check-processor.js';
+ const context = new AudioContext();
+
+ // Test if the incoming arrays are frozen as expected.
+ audit.define('check-frozen-array', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ const workletNode =
+ new AudioWorkletNode(context, 'array-frozen-processor');
+ workletNode.port.onmessage = (message) => {
+ const actual = message.data;
+ should(actual.isInputFrozen, '|inputs| is frozen').beTrue();
+ should(actual.isOutputFrozen, '|outputs| is frozen').beTrue();
+ task.done();
+ };
+ });
+ });
+
+ // The incoming arrays should not be transferred, but the associated
+ // ArrayBuffers can be transferred. See the `array-transfer-processor`
+ // definition for the details.
+ audit.define('transfer-frozen-array', (task, should) => {
+ const sourceNode = new ConstantSourceNode(context);
+ const workletNode =
+ new AudioWorkletNode(context, 'array-transfer-processor');
+ workletNode.port.onmessage = (message) => {
+ const actual = message.data;
+ if (actual.type === 'assertion')
+ should(actual.success, actual.message).beTrue();
+ if (actual.done)
+ task.done();
+ };
+ sourceNode.connect(workletNode);
+ sourceNode.start();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html
new file mode 100644
index 0000000000..e1c19f0d75
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-process-zero-outputs.https.html
@@ -0,0 +1,36 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test if |outputs| argument is all zero in AudioWorkletProcessor.process()
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+ const filePath = 'processors/zero-outputs-check-processor.js';
+ const context = new AudioContext();
+
+ // Test if the incoming arrays are frozen as expected.
+ audit.define('check-zero-outputs', (task, should) => {
+ context.audioWorklet.addModule(filePath).then(() => {
+ const workletNode =
+ new AudioWorkletNode(context, 'zero-outputs-check-processor');
+ workletNode.port.onmessage = (message) => {
+ const actual = message.data;
+ if (actual.type === 'assertion') {
+ should(actual.success, actual.message).beTrue();
+ task.done();
+ }
+ };
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html
new file mode 100644
index 0000000000..079b57b959
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-promises.https.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test micro task checkpoints in AudioWorkletGlobalScope
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <meta charset=utf-8>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ promise_test(async () => {
+ const context = new AudioContext();
+
+ let filePath = 'processors/promise-processor.js';
+
+ await context.audioWorklet.addModule(filePath);
+ await context.suspend();
+ let node1 = new AudioWorkletNode(context, 'promise-processor');
+ let node2 = new AudioWorkletNode(context, 'promise-processor');
+
+ // Connecting to the destination is not strictly necessary in theory,
+ // but see
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=1045926
+ // for why it is in practice.
+ node1.connect(node2).connect(context.destination);
+
+ await context.resume();
+
+ // The second node is the one that is going to receive the message,
+ // per spec: it is the second that will be processed, each time.
+ const e = await new Promise((resolve) => {
+ node2.port.onmessage = resolve;
+ });
+ context.close();
+ assert_equals(e.data, "ok",
+ `Microtask checkpoints are performed
+ in between render quantum`);
+ }, "test");
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html
new file mode 100644
index 0000000000..4281f56379
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html
@@ -0,0 +1,30 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Checking BaseAudioContext.audioWorklet
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let realtimeContext = new AudioContext();
+ let offlineContext = new OfflineAudioContext(1, 1, 44100);
+
+ // Test if AudioWorklet exists.
+ audit.define('Test if AudioWorklet exists', (task, should) => {
+ should(realtimeContext.audioWorklet instanceof AudioWorklet &&
+ offlineContext.audioWorklet instanceof AudioWorklet,
+ 'BaseAudioContext.audioWorklet is an instance of AudioWorklet')
+ .beTrue();
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html
new file mode 100644
index 0000000000..75f4aa4020
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/extended-audioworkletnode-with-parameters.https.html
@@ -0,0 +1,16 @@
+<!doctype html>
+<title>Test AudioWorkletNode subclass with parameters</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+class Extended extends AudioWorkletNode {}
+
+const modulePath = 'processors/gain-processor.js';
+
+promise_test(async () => {
+ const context = new AudioContext();
+ await context.audioWorklet.addModule(modulePath);
+ const node = new Extended(context, 'gain');
+ assert_equals(Object.getPrototypeOf(node), Extended.prototype);
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html
new file mode 100644
index 0000000000..a4c59123a1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html
@@ -0,0 +1,23 @@
+<!doctype html>
+<title>Test use of 'process' getter for AudioWorkletProcessor callback</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+const do_test = async (node_name) => {
+ const context = new AudioContext();
+ const filePath = `processors/${node_name}-processor.js`;
+ await context.audioWorklet.addModule(filePath);
+ const node = new AudioWorkletNode(context, node_name);
+ const event = await new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ });
+ assert_equals(event.data.message, "done");
+};
+
+// Includes testing for https://github.com/WebAudio/web-audio-api/pull/2104
+promise_test(async () => do_test('process-getter-test-prototype'),
+ "'process' getter on prototype");
+
+promise_test(async () => do_test('process-getter-test-instance'),
+ "'process' getter on instance");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html
new file mode 100644
index 0000000000..4c6a10dfab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html
@@ -0,0 +1,87 @@
+<!doctype html>
+<title>Test parameters of process() AudioWorkletProcessor callback</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+var context;
+promise_setup(async (t) => {
+ context = new AudioContext();
+ const filePath = 'processors/process-parameter-test-processor.js';
+ await context.audioWorklet.addModule(filePath);
+});
+
+const get_parameters = async (node, options) => {
+ const event = await new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ });
+ const inputs = event.data.inputs;
+ assert_equals(inputs.length, options.numberOfInputs, 'inputs length');
+ const outputs = event.data.outputs;
+ assert_equals(outputs.length, options.numberOfOutputs, 'outputs length');
+ for (let port = 0; port < inputs.length; ++port) {
+ for (let channel = 0; channel < inputs[port].length; ++channel) {
+ assert_equals(inputs[port][channel].length, 128,
+ `inputs[${port}][${channel}].length`);
+ }
+ }
+ for (let port = 0; port < outputs.length; ++port) {
+ for (let channel = 0; channel < outputs[port].length; ++channel) {
+ assert_equals(outputs[port][channel].length, 128,
+ `outputs[${port}][${channel}].length`);
+ }
+ }
+ return event.data;
+};
+
+promise_test(async (t) => {
+ const options = {
+ numberOfInputs: 3,
+ numberOfOutputs: 0
+ };
+ // Connect a source so that one channel of one input is active.
+ context.suspend();
+ const source = new ConstantSourceNode(context);
+ source.start();
+ const merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ const active_channel_index = merger.numberOfInputs - 1;
+ source.connect(merger, 0, active_channel_index);
+ const node = new AudioWorkletNode(context, 'process-parameter-test', options);
+ const active_port_index = options.numberOfInputs - 1;
+ merger.connect(node, 0, active_port_index);
+ context.resume();
+ const {inputs} = await get_parameters(node, options);
+ for (let port = 0; port < inputs.length - 1; ++port) {
+ if (port != active_port_index) {
+ assert_equals(inputs[port].length, 0, `inputs[${port}].length`);
+ }
+ }
+ const active_input = inputs[active_port_index];
+ assert_equals(active_input.length, merger.numberOfInputs,
+ 'active_input.length');
+ for (let channel = 0; channel < active_input.length; ++channel) {
+ let expected = channel == active_channel_index ? 1.0 : 0.0;
+ for (let sample = 0; sample < inputs.length; ++sample) {
+ assert_equals(active_input[channel][sample], expected,
+ `active_input[${channel}][${sample}]`);
+ }
+ }
+}, '3 inputs; 0 outputs');
+
+promise_test(async (t) => {
+ const options = {
+ numberOfInputs: 0,
+ numberOfOutputs: 3
+ };
+ const node = new AudioWorkletNode(context, 'process-parameter-test', options);
+ const {outputs} = await get_parameters(node, options);
+ for (let port = 0; port < outputs.length; ++port) {
+ assert_equals(outputs[port].length, 1, `outputs[${port}].length`);
+ for (let channel = 0; channel < outputs[port].length; ++channel) {
+ for (let sample = 0; sample < outputs.length; ++sample) {
+ assert_equals(outputs[port][channel][sample], 0.0,
+ `outputs[${port}][${channel}][${sample}]`);
+ }
+ }
+ }
+}, '0 inputs; 3 outputs');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html
new file mode 100644
index 0000000000..6f1aa59225
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html
@@ -0,0 +1,61 @@
+<!doctype html>
+<title>Test processor port assignment on processor callback function construction</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+// https://webaudio.github.io/web-audio-api/#AudioWorkletProcessor-instantiation
+
+const get_context_for_node_name = async (node_name) => {
+ const context = new AudioContext();
+ const filePath = `processors/construction-port-${node_name}.js`;
+ await context.audioWorklet.addModule(filePath);
+ return context;
+}
+
+const test_throws = async ({node_name, thrower} = {}) => {
+ const context = await get_context_for_node_name(node_name);
+ const node = new AudioWorkletNode(context, node_name);
+ const event = await new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ });
+ assert_true(event.data.threw, `${thrower} should throw`);
+ assert_equals(event.data.errorName, "TypeError");
+ assert_true(event.data.isTypeError, "exception should be TypeError");
+};
+
+const throw_tests = [
+ {
+ test_name: 'super() after new AudioWorkletProcessor()',
+ node_name: 'super-after-new',
+ thrower: 'super()'
+ },
+ {
+ test_name: 'new AudioWorkletProcessor() after super()',
+ node_name: 'new-after-super',
+ thrower: 'new AudioWorkletProcessor()'
+ },
+ {
+ test_name: 'new AudioWorkletProcessor() after new AudioWorkletProcessor()',
+ node_name: 'new-after-new',
+ thrower: 'new AudioWorkletProcessor()'
+ }
+];
+for (const test_info of throw_tests) {
+ promise_test(async () => test_throws(test_info), test_info.test_name);
+}
+
+promise_test(async (t) => {
+ const node_name = 'singleton';
+ const context = await get_context_for_node_name(node_name);
+ const node1 = new AudioWorkletNode(context, node_name);
+ const node2 = new AudioWorkletNode(context, node_name);
+ node2.onmessage = t.unreached_func("node2 should not receive a message");
+ let count = 0;
+ await new Promise((resolve) => {
+ node1.port.onmessage = t.step_func((event) => {
+ assert_less_than(count, 2, "message count");
+ if (++count == 2) { resolve(); };
+ });
+ });
+}, 'Singleton AudioWorkletProcessor');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js
new file mode 100644
index 0000000000..ef497733ca
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/active-processing.js
@@ -0,0 +1,54 @@
+/**
+ * @class ActiveProcessingTester
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class sends a message to its AudioWorkletNodew whenever the
+ * number of channels on the input changes. The message includes the actual
+ * number of channels, the context time at which this occurred, and whether
+ * we're done processing or not.
+ */
+class ActiveProcessingTester extends AudioWorkletProcessor {
+ constructor(options) {
+ super(options);
+ this._lastChannelCount = 0;
+
+ // See if user specified a value for test duration.
+ if (options.hasOwnProperty('processorOptions') &&
+ options.processorOptions.hasOwnProperty('testDuration')) {
+ this._testDuration = options.processorOptions.testDuration;
+ } else {
+ this._testDuration = 5;
+ }
+
+ // Time at which we'll signal we're done, based on the requested
+ // |testDuration|
+ this._endTime = currentTime + this._testDuration;
+ }
+
+ process(inputs, outputs) {
+ const input = inputs[0];
+ const output = outputs[0];
+ const inputChannelCount = input.length;
+ const isFinished = currentTime > this._endTime;
+
+ // Send a message if we're done or the count changed.
+ if (isFinished || (inputChannelCount != this._lastChannelCount)) {
+ this.port.postMessage({
+ channelCount: inputChannelCount,
+ finished: isFinished,
+ time: currentTime
+ });
+ this._lastChannelCount = inputChannelCount;
+ }
+
+ // Just copy the input to the output for no particular reason.
+ for (let channel = 0; channel < input.length; ++channel) {
+ output[channel].set(input[channel]);
+ }
+
+ // When we're finished, this method no longer needs to be called.
+ return !isFinished;
+ }
+}
+
+registerProcessor('active-processing-tester', ActiveProcessingTester);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js
new file mode 100644
index 0000000000..d05056bd84
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/add-offset.js
@@ -0,0 +1,34 @@
+/*
+ * @class AddOffsetProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * Just adds a fixed value to the input
+ */
+class AddOffsetProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super();
+
+ this._offset = options.processorOptions.offset;
+ }
+
+ process(inputs, outputs) {
+ // This processor assumes the node has at least 1 input and 1 output.
+ let input = inputs[0];
+ let output = outputs[0];
+ let outputChannel = output[0];
+
+ if (input.length > 0) {
+ let inputChannel = input[0];
+ for (let k = 0; k < outputChannel.length; ++k)
+ outputChannel[k] = inputChannel[k] + this._offset;
+ } else {
+ // No input connected, so pretend it's silence and just fill the
+ // output with the offset value.
+ outputChannel.fill(this._offset);
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('add-offset-processor', AddOffsetProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js
new file mode 100644
index 0000000000..d6eeff3d15
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/array-check-processor.js
@@ -0,0 +1,94 @@
+/**
+ * @class ArrayFrozenProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ArrayFrozenProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this._messageSent = false;
+ }
+
+ process(inputs, outputs, parameters) {
+ const input = inputs[0];
+ const output = outputs[0];
+
+ if (!this._messageSent) {
+ this.port.postMessage({
+ inputLength: input.length,
+ isInputFrozen: Object.isFrozen(inputs) && Object.isFrozen(input),
+ outputLength: output.length,
+ isOutputFrozen: Object.isFrozen(outputs) && Object.isFrozen(output)
+ });
+ this._messageSent = true;
+ }
+
+ return false;
+ }
+}
+
+/**
+ * @class ArrayTransferProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ArrayTransferProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this._messageSent = false;
+ }
+
+ process(inputs, outputs, parameters) {
+ const input = inputs[0];
+ const output = outputs[0];
+
+ if (!this._messageSent) {
+ try {
+ // Transferring Array objects should NOT work.
+ this.port.postMessage({
+ inputs, input, inputChannel: input[0],
+ outputs, output, outputChannel: output[0]
+ }, [inputs, input, inputs[0], outputs, output, output[0]]);
+ // Hence, the following must NOT be reached.
+ this.port.postMessage({
+ type: 'assertion',
+ success: false,
+ message: 'Transferring inputs/outputs, an individual input/output ' +
+ 'array, or a channel Float32Array MUST fail, but succeeded.'
+ });
+ } catch (error) {
+ this.port.postMessage({
+ type: 'assertion',
+ success: true,
+ message: 'Transferring inputs/outputs, an individual input/output ' +
+ 'array, or a channel Float32Array is not allowed as expected.'
+ });
+ }
+
+ try {
+ // Transferring ArrayBuffers should work.
+ this.port.postMessage(
+ {inputChannel: input[0], outputChannel: output[0]},
+ [input[0].buffer, output[0].buffer]);
+ this.port.postMessage({
+ type: 'assertion',
+ success: true,
+ message: 'Transferring ArrayBuffers was successful as expected.'
+ });
+ } catch (error) {
+ // This must NOT be reached.
+ this.port.postMessage({
+ type: 'assertion',
+ success: false,
+ message: 'Transferring ArrayBuffers unexpectedly failed.'
+ });
+ }
+
+ this.port.postMessage({done: true});
+ this._messageSent = true;
+ }
+
+ return false;
+ }
+}
+
+registerProcessor('array-frozen-processor', ArrayFrozenProcessor);
+registerProcessor('array-transfer-processor', ArrayTransferProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js
new file mode 100644
index 0000000000..556459f46b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/channel-count-processor.js
@@ -0,0 +1,19 @@
+/**
+ * @class ChannelCountProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ChannelCountProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super(options);
+ }
+
+ process(inputs, outputs) {
+ this.port.postMessage({
+ inputChannel: inputs[0].length,
+ outputChannel: outputs[0].length
+ });
+ return false;
+ }
+}
+
+registerProcessor('channel-count', ChannelCountProcessor); \ No newline at end of file
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js
new file mode 100644
index 0000000000..d4c63f7775
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js
@@ -0,0 +1,16 @@
+class NewAfterNew extends AudioWorkletProcessor {
+ constructor() {
+ const processor = new AudioWorkletProcessor()
+ let message = {threw: false};
+ try {
+ new AudioWorkletProcessor();
+ } catch (e) {
+ message.threw = true;
+ message.errorName = e.name;
+ message.isTypeError = e instanceof TypeError;
+ }
+ processor.port.postMessage(message);
+ return processor;
+ }
+}
+registerProcessor("new-after-new", NewAfterNew);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js
new file mode 100644
index 0000000000..a6d4f0e2e8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js
@@ -0,0 +1,15 @@
+class NewAfterSuper extends AudioWorkletProcessor {
+ constructor() {
+ super()
+ let message = {threw: false};
+ try {
+ new AudioWorkletProcessor()
+ } catch (e) {
+ message.threw = true;
+ message.errorName = e.name;
+ message.isTypeError = e instanceof TypeError;
+ }
+ this.port.postMessage(message);
+ }
+}
+registerProcessor("new-after-super", NewAfterSuper);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js
new file mode 100644
index 0000000000..c40b5a7179
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js
@@ -0,0 +1,16 @@
+let singleton;
+class Singleton extends AudioWorkletProcessor {
+ constructor() {
+ if (!singleton) {
+ singleton = new AudioWorkletProcessor();
+ singleton.process = function() {
+ this.port.postMessage({message: "process called"});
+ // This function will be called at most once for each AudioWorkletNode
+ // if the node has no input connections.
+ return false;
+ }
+ }
+ return singleton;
+ }
+}
+registerProcessor("singleton", Singleton);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js
new file mode 100644
index 0000000000..e447830c5f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js
@@ -0,0 +1,16 @@
+class SuperAfterNew extends AudioWorkletProcessor {
+ constructor() {
+ const processor = new AudioWorkletProcessor()
+ let message = {threw: false};
+ try {
+ super();
+ } catch (e) {
+ message.threw = true;
+ message.errorName = e.name;
+ message.isTypeError = e instanceof TypeError;
+ }
+ processor.port.postMessage(message);
+ return processor;
+ }
+}
+registerProcessor("super-after-new", SuperAfterNew);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js
new file mode 100644
index 0000000000..d1b16cc9aa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor-globalthis.js
@@ -0,0 +1,12 @@
+class DummyProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ // Doesn't do anything here.
+ return true;
+ }
+}
+
+globalThis.registerProcessor('dummy-globalthis', DummyProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js
new file mode 100644
index 0000000000..11155d508c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dummy-processor.js
@@ -0,0 +1,18 @@
+/**
+ * @class DummyProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the bare-bone structure of the processor.
+ */
+class DummyProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ // Doesn't do anything here.
+ return true;
+ }
+}
+
+registerProcessor('dummy', DummyProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js
new file mode 100644
index 0000000000..5e825aebb4
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/dynamic-register-processor.js
@@ -0,0 +1,22 @@
+class ProcessorA extends AudioWorkletProcessor {
+ process() {
+ return true;
+ }
+}
+
+// ProcessorB registers ProcessorA upon the construction.
+class ProcessorB extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = () => {
+ registerProcessor('ProcessorA', ProcessorA);
+ this.port.postMessage({});
+ };
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('ProcessorB', ProcessorB);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js
new file mode 100644
index 0000000000..3b010db4f2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/error-processor.js
@@ -0,0 +1,33 @@
+/**
+ * @class ConstructorErrorProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ConstructorErrorProcessor extends AudioWorkletProcessor {
+ constructor() {
+ throw 'ConstructorErrorProcessor: an error thrown from constructor.';
+ }
+
+ process() {
+ return true;
+ }
+}
+
+
+/**
+ * @class ProcessErrorProcessor
+ * @extends AudioWorkletProcessor
+ */
+class ProcessErrorProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process() {
+ throw 'ProcessErrorProcessor: an error throw from process method.';
+ return true;
+ }
+}
+
+
+registerProcessor('constructor-error', ConstructorErrorProcessor);
+registerProcessor('process-error', ProcessErrorProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js
new file mode 100644
index 0000000000..e9e130e374
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/gain-processor.js
@@ -0,0 +1,38 @@
+/**
+ * @class GainProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the bare-bone structure of the processor.
+ */
+class GainProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return [
+ {name: 'gain', defaultValue: 0.707}
+ ];
+ }
+
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+ let gain = parameters.gain;
+ for (let channel = 0; channel < input.length; ++channel) {
+ let inputChannel = input[channel];
+ let outputChannel = output[channel];
+ if (gain.length === 1) {
+ for (let i = 0; i < inputChannel.length; ++i)
+ outputChannel[i] = inputChannel[i] * gain[0];
+ } else {
+ for (let i = 0; i < inputChannel.length; ++i)
+ outputChannel[i] = inputChannel[i] * gain[i];
+ }
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('gain', GainProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js
new file mode 100644
index 0000000000..6d53ba84c7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-count-processor.js
@@ -0,0 +1,22 @@
+/**
+ * @class CountProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class just looks at the number of input channels on the first
+ * input and fills the first output channel with that value.
+ */
+class CountProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+ output[0].fill(input.length);
+
+ return true;
+ }
+}
+
+registerProcessor('counter', CountProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js
new file mode 100644
index 0000000000..be485f03e8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/input-length-processor.js
@@ -0,0 +1,27 @@
+/**
+ * @class InputLengthProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class just sets the output to the length of the
+ * input array for verifying that the input length changes when the
+ * input is disconnected.
+ */
+class InputLengthProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+
+ // Set output channel to the length of the input channel array.
+ // If the input is unconnected, set the value to zero.
+ const fillValue = input.length > 0 ? input[0].length : 0;
+ output[0].fill(fillValue);
+
+ return true;
+ }
+}
+
+registerProcessor('input-length-processor', InputLengthProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js
new file mode 100644
index 0000000000..e4a5dc39ba
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/invalid-param-array-processor.js
@@ -0,0 +1,47 @@
+/**
+ * @class InvalidParamArrayProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor intentionally returns an array with an invalid size when the
+ * processor's getter is queried.
+ */
+let singleton = undefined;
+let secondFetch = false;
+let useDescriptor = false;
+let processCounter = 0;
+
+class InvalidParamArrayProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ if (useDescriptor)
+ return [{name: 'invalidParam'}];
+ useDescriptor = true;
+ return [];
+ }
+
+ constructor() {
+ super();
+ if (singleton === undefined)
+ singleton = this;
+ return singleton;
+ }
+
+ process(inputs, outputs, parameters) {
+ const output = outputs[0];
+ for (let channel = 0; channel < output.length; ++channel)
+ output[channel].fill(1);
+ return false;
+ }
+}
+
+// This overridden getter is invoked under the hood before process() gets
+// called. After this gets called, process() method above will be invalidated,
+// and mark the worklet node non-functional. (i.e. in an error state)
+Object.defineProperty(Object.prototype, 'invalidParam', {'get': () => {
+ if (secondFetch)
+ return new Float32Array(256);
+ secondFetch = true;
+ return new Float32Array(128);
+}});
+
+registerProcessor('invalid-param-array-1', InvalidParamArrayProcessor);
+registerProcessor('invalid-param-array-2', InvalidParamArrayProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js
new file mode 100644
index 0000000000..0bcc43f6f0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/one-pole-processor.js
@@ -0,0 +1,49 @@
+/**
+ * @class OnePoleFilter
+ * @extends AudioWorkletProcessor
+ *
+ * A simple One-pole filter.
+ */
+
+class OnePoleFilter extends AudioWorkletProcessor {
+
+ // This gets evaluated as soon as the global scope is created.
+ static get parameterDescriptors() {
+ return [{
+ name: 'frequency',
+ defaultValue: 250,
+ minValue: 0,
+ maxValue: 0.5 * sampleRate
+ }];
+ }
+
+ constructor() {
+ super();
+ this.updateCoefficientsWithFrequency_(250);
+ }
+
+ updateCoefficientsWithFrequency_(frequency) {
+ this.b1_ = Math.exp(-2 * Math.PI * frequency / sampleRate);
+ this.a0_ = 1.0 - this.b1_;
+ this.z1_ = 0;
+ }
+
+ process(inputs, outputs, parameters) {
+ let input = inputs[0];
+ let output = outputs[0];
+ let frequency = parameters.frequency;
+ for (let channel = 0; channel < output.length; ++channel) {
+ let inputChannel = input[channel];
+ let outputChannel = output[channel];
+ for (let i = 0; i < outputChannel.length; ++i) {
+ this.updateCoefficientsWithFrequency_(frequency[i]);
+ this.z1_ = inputChannel[i] * this.a0_ + this.z1_ * this.b1_;
+ outputChannel[i] = this.z1_;
+ }
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('one-pole-filter', OnePoleFilter);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js
new file mode 100644
index 0000000000..27e1da6325
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/option-test-processor.js
@@ -0,0 +1,19 @@
+/**
+ * @class OptionTestProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the option passing feature by echoing the
+ * received |nodeOptions| back to the node.
+ */
+class OptionTestProcessor extends AudioWorkletProcessor {
+ constructor(nodeOptions) {
+ super();
+ this.port.postMessage(nodeOptions);
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('option-test-processor', OptionTestProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js
new file mode 100644
index 0000000000..d7ce836500
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/param-size-processor.js
@@ -0,0 +1,30 @@
+/**
+ * @class ParamSizeProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor is a source node which basically outputs the size of the
+ * AudioParam array for each render quantum.
+ */
+
+class ParamSizeProcessor extends AudioWorkletProcessor {
+ static get parameterDescriptors() {
+ return [{name: 'param'}];
+ }
+
+ constructor() {
+ super();
+ }
+
+ process(inputs, outputs, parameters) {
+ let output = outputs[0];
+ let param = parameters.param;
+
+ for (let channel = 0; channel < output.length; ++channel) {
+ output[channel].fill(param.length);
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('param-size', ParamSizeProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js
new file mode 100644
index 0000000000..8def5a61d7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/port-processor.js
@@ -0,0 +1,34 @@
+/**
+ * @class PortProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates the message port functionality.
+ */
+class PortProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.handleMessage.bind(this);
+ this.port.postMessage({
+ state: 'created',
+ timeStamp: currentTime,
+ currentFrame: currentFrame
+ });
+ this.processCallCount = 0;
+ }
+
+ handleMessage(event) {
+ this.port.postMessage({
+ message: event.data,
+ timeStamp: currentTime,
+ currentFrame: currentFrame,
+ processCallCount: this.processCallCount
+ });
+ }
+
+ process() {
+ ++this.processCallCount;
+ return true;
+ }
+}
+
+registerProcessor('port-processor', PortProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js
new file mode 100644
index 0000000000..b1434f54ba
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js
@@ -0,0 +1,44 @@
+/**
+ * @class ProcessGetterTestInstanceProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class tests that a 'process' getter on an
+ * AudioWorkletProcessorConstructor instance is called at the right times.
+ */
+
+class ProcessGetterTestInstanceProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.getterCallCount = 0;
+ this.totalProcessCallCount = 0;
+ Object.defineProperty(this, 'process', { get: function() {
+ if (!(this instanceof ProcessGetterTestInstanceProcessor)) {
+ throw new Error('`process` getter called with bad `this`.');
+ }
+ ++this.getterCallCount;
+ let functionCallCount = 0;
+ return () => {
+ if (++functionCallCount > 1) {
+ const message = 'Closure of function returned from `process` getter' +
+ ' should be used for only one call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ if (++this.totalProcessCallCount < 2) {
+ return true; // Expect another getter call.
+ }
+ if (this.totalProcessCallCount != this.getterCallCount) {
+ const message =
+ 'Getter should be called only once for each process() call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ this.port.postMessage({message: 'done'});
+ return false; // No more calls required.
+ };
+ }});
+ }
+}
+
+registerProcessor('process-getter-test-instance',
+ ProcessGetterTestInstanceProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js
new file mode 100644
index 0000000000..cef5fa8b52
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js
@@ -0,0 +1,55 @@
+/**
+ * @class ProcessGetterTestPrototypeProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class tests that a 'process' getter on
+ * AudioWorkletProcessorConstructor is called at the right times.
+ */
+
+// Reporting errors during registerProcess() is awkward.
+// The occurrance of an error is flagged, so that a trial registration can be
+// performed and registration against the expected AudioWorkletNode name is
+// performed only if no errors are flagged during the trial registration.
+let error_flag = false;
+
+class ProcessGetterTestPrototypeProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.getterCallCount = 0;
+ this.totalProcessCallCount = 0;
+ }
+ get process() {
+ if (!(this instanceof ProcessGetterTestPrototypeProcessor)) {
+ error_flag = true;
+ throw new Error('`process` getter called with bad `this`.');
+ }
+ ++this.getterCallCount;
+ let functionCallCount = 0;
+ return () => {
+ if (++functionCallCount > 1) {
+ const message = 'Closure of function returned from `process` getter' +
+ ' should be used for only one call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ if (++this.totalProcessCallCount < 2) {
+ return true; // Expect another getter call.
+ }
+ if (this.totalProcessCallCount != this.getterCallCount) {
+ const message =
+ 'Getter should be called only once for each process() call.'
+ this.port.postMessage({message: message});
+ throw new Error(message);
+ }
+ this.port.postMessage({message: 'done'});
+ return false; // No more calls required.
+ };
+ }
+}
+
+registerProcessor('trial-process-getter-test-prototype',
+ ProcessGetterTestPrototypeProcessor);
+if (!error_flag) {
+ registerProcessor('process-getter-test-prototype',
+ ProcessGetterTestPrototypeProcessor);
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js
new file mode 100644
index 0000000000..a300d3cdec
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js
@@ -0,0 +1,18 @@
+/**
+ * @class ProcessParameterTestProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class forwards input and output parameters to its
+ * AudioWorkletNode.
+ */
+class ProcessParameterTestProcessor extends AudioWorkletProcessor {
+ process(inputs, outputs) {
+ this.port.postMessage({
+ inputs: inputs,
+ outputs: outputs
+ });
+ return false;
+ }
+}
+
+registerProcessor('process-parameter-test', ProcessParameterTestProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js
new file mode 100644
index 0000000000..6a8144b3cc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/promise-processor.js
@@ -0,0 +1,40 @@
+/**
+ * @class PromiseProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor creates and resolves a promise in its `process` method. When
+ * the handler passed to `then()` is called, a counter that is global in the
+ * global scope is incremented. There are two copies of this
+ * AudioWorkletNode/Processor, so the counter should always be even in the
+ * process method of the AudioWorklet processing, since the Promise completion
+ * handler are resolved in between render quanta.
+ *
+ * After a few iterations of the test, one of the worklet posts back the string
+ * "ok" to the main thread, and the test is considered a success.
+ */
+var idx = 0;
+
+class PromiseProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super(options);
+ }
+
+ process(inputs, outputs) {
+ if (idx % 2 != 0) {
+ this.port.postMessage("ko");
+ // Don't bother continuing calling process in this case, the test has
+ // already failed.
+ return false;
+ }
+ Promise.resolve().then(() => {
+ idx++;
+ if (idx == 100) {
+ this.port.postMessage("ok");
+ }
+ });
+ // Ensure process is called again.
+ return true;
+ }
+}
+
+registerProcessor('promise-processor', PromiseProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js
new file mode 100644
index 0000000000..2ccacccd4b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/sharedarraybuffer-processor.js
@@ -0,0 +1,35 @@
+/**
+ * @class SharedArrayBufferProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class demonstrates passing SharedArrayBuffers to and from
+ * workers.
+ */
+class SharedArrayBufferProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.handleMessage.bind(this);
+ this.port.onmessageerror = this.handleMessageError.bind(this);
+ let sab = new SharedArrayBuffer(8);
+ this.port.postMessage({state: 'created', sab});
+ }
+
+ handleMessage(event) {
+ this.port.postMessage({
+ state: 'received message',
+ isSab: event.data instanceof SharedArrayBuffer
+ });
+ }
+
+ handleMessageError(event) {
+ this.port.postMessage({
+ state: 'received messageerror'
+ });
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('sharedarraybuffer-processor', SharedArrayBufferProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js
new file mode 100644
index 0000000000..714e32dbb5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/timing-info-processor.js
@@ -0,0 +1,25 @@
+/**
+ * @class TimingInfoProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class is to test the timing information in AWGS.
+ */
+class TimingInfoProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.port.onmessage = this.echoMessage.bind(this);
+ }
+
+ echoMessage(event) {
+ this.port.postMessage({
+ currentTime: currentTime,
+ currentFrame: currentFrame
+ });
+ }
+
+ process() {
+ return true;
+ }
+}
+
+registerProcessor('timing-info-processor', TimingInfoProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js
new file mode 100644
index 0000000000..2d7399ca3b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-output-processor.js
@@ -0,0 +1,42 @@
+/**
+ * @class ZeroOutputProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor accumulates the incoming buffer and send the buffered data
+ * to the main thread when it reaches the specified frame length. The processor
+ * only supports the single input.
+ */
+
+const kRenderQuantumFrames = 128;
+
+class ZeroOutputProcessor extends AudioWorkletProcessor {
+ constructor(options) {
+ super();
+
+ this._framesRequested = options.processorOptions.bufferLength;
+ this._framesCaptured = 0;
+ this._buffer = [];
+ for (let i = 0; i < options.processorOptions.channeCount; ++i) {
+ this._buffer[i] = new Float32Array(this._framesRequested);
+ }
+ }
+
+ process(inputs) {
+ let input = inputs[0];
+ let startIndex = this._framesCaptured;
+ let endIndex = startIndex + kRenderQuantumFrames;
+ for (let i = 0; i < this._buffer.length; ++i) {
+ this._buffer[i].subarray(startIndex, endIndex).set(input[i]);
+ }
+ this._framesCaptured = endIndex;
+
+ if (this._framesCaptured >= this._framesRequested) {
+ this.port.postMessage({ capturedBuffer: this._buffer });
+ return false;
+ } else {
+ return true;
+ }
+ }
+}
+
+registerProcessor('zero-output-processor', ZeroOutputProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js
new file mode 100644
index 0000000000..f816e918a2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/zero-outputs-check-processor.js
@@ -0,0 +1,78 @@
+/**
+ * Returns true if a given AudioPort is completely filled with zero samples.
+ * "AudioPort" is a short-hand for FrozenArray<FrozenArray<Float32Array>>.
+ *
+ * @param {FrozenArray<FrozenArray<Float32Array>>} audioPort
+ * @returns bool
+ */
+function IsAllZero(audioPort) {
+ for (let busIndex = 0; busIndex < audioPort.length; ++busIndex) {
+ const audioBus = audioPort[busIndex];
+ for (let channelIndex = 0; channelIndex < audioBus.length; ++channelIndex) {
+ const audioChannel = audioBus[channelIndex];
+ for (let sample = 0; sample < audioChannel.length; ++sample) {
+ if (audioChannel[sample] != 0)
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+const kRenderQuantumFrames = 128;
+const kTestLengthInSec = 1.0;
+const kPulseDuration = 100;
+
+/**
+ * Checks the |outputs| argument of AudioWorkletProcessor.process() and
+ * send a message to an associated AudioWorkletNode. It needs to be all zero
+ * at all times.
+ *
+ * @class ZeroOutputsCheckProcessor
+ * @extends {AudioWorkletProcessor}
+ */
+class ZeroOutputsCheckProcessor extends AudioWorkletProcessor {
+ constructor() {
+ super();
+ this.startTime = currentTime;
+ this.counter = 0;
+ }
+
+ process(inputs, outputs) {
+ if (!IsAllZero(outputs)) {
+ this.port.postMessage({
+ type: 'assertion',
+ success: false,
+ message: 'Unexpected Non-zero sample found in |outputs|.'
+ });
+ return false;
+ }
+
+ if (currentTime - this.startTime >= kTestLengthInSec) {
+ this.port.postMessage({
+ type: 'assertion',
+ success: true,
+ message: `|outputs| has been all zeros for ${kTestLengthInSec} ` +
+ 'seconds as expected.'
+ });
+ return false;
+ }
+
+ // Every ~0.25 second (100 render quanta), switch between outputting white
+ // noise and just exiting without doing anything. (from crbug.com/1099756)
+ this.counter++;
+ if (Math.floor(this.counter / kPulseDuration) % 2 == 0)
+ return true;
+
+ let output = outputs[0];
+ for (let channel = 0; channel < output.length; ++channel) {
+ for (let sample = 0; sample < 128; sample++) {
+ output[channel][sample] = 0.1 * (Math.random() - 0.5);
+ }
+ }
+
+ return true;
+ }
+}
+
+registerProcessor('zero-outputs-check-processor', ZeroOutputsCheckProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html
new file mode 100644
index 0000000000..7b9e7f0ac3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html
@@ -0,0 +1,90 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Test Simple AudioWorklet I/O</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // Arbitrary sample rate
+ const sampleRate = 48000;
+
+ // The offset to be applied by the worklet to its inputs.
+ const offset = 1;
+
+ // Location of the worklet's code
+ const filePath = 'processors/add-offset.js';
+
+ let audit = Audit.createTaskRunner();
+
+ // Context to be used for the tests.
+ let context;
+
+ audit.define('Initialize worklet', (task, should) => {
+ // Two channels for testing. Channel 0 is the output of the
+ // AudioWorklet. Channel 1 is the oscillator so we can compare
+ // the outputs.
+ context = new OfflineAudioContext(
+ {numberOfChannels: 2, length: sampleRate, sampleRate: sampleRate});
+
+ // Load up the code for the worklet.
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'Creation of AudioWorklet')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'test', description: 'Simple AudioWorklet I/O'},
+ (task, should) => {
+ let merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ let worklet = new AudioWorkletNode(
+ context, 'add-offset-processor',
+ {processorOptions: {offset: offset}});
+
+ src.connect(worklet).connect(merger, 0, 0);
+ src.connect(merger, 0, 1);
+
+ // Start and stop the source. The stop time is fairly arbitrary,
+ // but use a render quantum boundary for simplicity.
+ const stopFrame = RENDER_QUANTUM_FRAMES;
+ src.start(0);
+ src.stop(stopFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(buffer => {
+ let ch0 = buffer.getChannelData(0);
+ let ch1 = buffer.getChannelData(1);
+
+ let shifted = ch1.slice(0, stopFrame).map(x => x + offset);
+
+ // The initial part of the output should be the oscillator
+ // shifted by |offset|.
+ should(
+ ch0.slice(0, stopFrame),
+ `AudioWorklet output[0:${stopFrame - 1}]`)
+ .beCloseToArray(shifted, {absoluteThreshold: 0});
+
+ // Output should be constant after the source has stopped.
+ should(
+ ch0.slice(stopFrame),
+ `AudioWorklet output[${stopFrame}:]`)
+ .beConstantValueOf(offset);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html
new file mode 100644
index 0000000000..f6fa6ddd98
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/suspended-context-messageport.https.html
@@ -0,0 +1,51 @@
+<!doctype html>
+<title>Test MessagePort while AudioContext is not running</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+const get_node_and_message = (context) => {
+ const node = new AudioWorkletNode(context, 'port-processor');
+ return new Promise((resolve) => {
+ node.port.onmessage = (event) => resolve({node: node, event: event});
+ });
+};
+const ping_for_message = (node) => {
+ return new Promise((resolve) => {
+ node.port.onmessage = resolve;
+ node.port.postMessage('ping');
+ });
+};
+const modulePath = 'processors/port-processor.js';
+
+promise_test(async () => {
+ const realtime = new AudioContext();
+ await realtime.audioWorklet.addModule(modulePath);
+ await realtime.suspend();
+ const currentTime = realtime.currentTime;
+ let {node, event} = await get_node_and_message(realtime);
+ assert_equals(event.data.timeStamp, currentTime, 'created message time');
+ event = await ping_for_message(node);
+ assert_equals(event.data.timeStamp, currentTime, 'pong time');
+}, 'realtime suspended');
+
+let offline;
+promise_test(async () => {
+ offline = new OfflineAudioContext({length: 128 + 1, sampleRate: 16384});
+ await offline.audioWorklet.addModule(modulePath);
+ assert_equals(offline.currentTime, 0, 'time before start');
+ let {node, event} = await get_node_and_message(offline);
+ assert_equals(event.data.timeStamp, 0, 'created time before start');
+ event = await ping_for_message(node);
+ assert_equals(event.data.timeStamp, 0, 'pong time before start');
+}, 'offline before start');
+
+promise_test(async () => {
+ await offline.startRendering();
+ const expected = 2 * 128 / offline.sampleRate;
+ assert_equals(offline.currentTime, expected, 'time on complete');
+ let {node, event} = await get_node_and_message(offline);
+ assert_equals(event.data.timeStamp, expected, "created time on complete");
+ event = await ping_for_message(node);
+ assert_equals(event.data.timeStamp, expected, "pong time on complete");
+}, 'offline on complete');
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-allpass.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-allpass.html
new file mode 100644
index 0000000000..86618f9e46
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-allpass.html
@@ -0,0 +1,42 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-allpass.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad allpass filter'},
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ let filterParameters = [
+ {cutoff: 0, q: 10, gain: 1},
+ {cutoff: 1, q: 10, gain: 1},
+ {cutoff: .5, q: 0, gain: 1},
+ {cutoff: 0.25, q: 10, gain: 1},
+ ];
+ createTestAndRun(context, 'allpass', {
+ should: should,
+ threshold: 3.9337e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-automation.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-automation.html
new file mode 100644
index 0000000000..d459d16fb1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-automation.html
@@ -0,0 +1,406 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Biquad Automation Test
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Don't need to run these tests at high sampling rate, so just use a low
+ // one to reduce memory usage and complexity.
+ let sampleRate = 16000;
+
+ // How long to render for each test.
+ let renderDuration = 0.25;
+ // Where to end the automations. Fairly arbitrary, but must end before
+ // the renderDuration.
+ let automationEndTime = renderDuration / 2;
+
+ let audit = Audit.createTaskRunner();
+
+ // The definition of the linear ramp automation function.
+ function linearRamp(t, v0, v1, t0, t1) {
+ return v0 + (v1 - v0) * (t - t0) / (t1 - t0);
+ }
+
+ // Generate the filter coefficients for the specified filter using the
+ // given parameters for the given duration. |filterTypeFunction| is a
+ // function that returns the filter coefficients for one set of
+ // parameters. |parameters| is a property bag that contains the start and
+ // end values (as an array) for each of the biquad attributes. The
+ // properties are |freq|, |Q|, |gain|, and |detune|. |duration| is the
+ // number of seconds for which the coefficients are generated.
+ //
+ // A property bag with properties |b0|, |b1|, |b2|, |a1|, |a2|. Each
+ // propery is an array consisting of the coefficients for the time-varying
+ // biquad filter.
+ function generateFilterCoefficients(
+ filterTypeFunction, parameters, duration) {
+ let renderEndFrame = Math.ceil(renderDuration * sampleRate);
+ let endFrame = Math.ceil(duration * sampleRate);
+ let nCoef = renderEndFrame;
+ let b0 = new Float64Array(nCoef);
+ let b1 = new Float64Array(nCoef);
+ let b2 = new Float64Array(nCoef);
+ let a1 = new Float64Array(nCoef);
+ let a2 = new Float64Array(nCoef);
+
+ let k = 0;
+ // If the property is not given, use the defaults.
+ let freqs = parameters.freq || [350, 350];
+ let qs = parameters.Q || [1, 1];
+ let gains = parameters.gain || [0, 0];
+ let detunes = parameters.detune || [0, 0];
+
+ for (let frame = 0; frame <= endFrame; ++frame) {
+ // Apply linear ramp at frame |frame|.
+ let f =
+ linearRamp(frame / sampleRate, freqs[0], freqs[1], 0, duration);
+ let q = linearRamp(frame / sampleRate, qs[0], qs[1], 0, duration);
+ let g =
+ linearRamp(frame / sampleRate, gains[0], gains[1], 0, duration);
+ let d = linearRamp(
+ frame / sampleRate, detunes[0], detunes[1], 0, duration);
+
+ // Compute actual frequency parameter
+ f = f * Math.pow(2, d / 1200);
+
+ // Compute filter coefficients
+ let coef = filterTypeFunction(f / (sampleRate / 2), q, g);
+ b0[k] = coef.b0;
+ b1[k] = coef.b1;
+ b2[k] = coef.b2;
+ a1[k] = coef.a1;
+ a2[k] = coef.a2;
+ ++k;
+ }
+
+ // Fill the rest of the arrays with the constant value to the end of
+ // the rendering duration.
+ b0.fill(b0[endFrame], endFrame + 1);
+ b1.fill(b1[endFrame], endFrame + 1);
+ b2.fill(b2[endFrame], endFrame + 1);
+ a1.fill(a1[endFrame], endFrame + 1);
+ a2.fill(a2[endFrame], endFrame + 1);
+
+ return {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2};
+ }
+
+ // Apply the given time-varying biquad filter to the given signal,
+ // |signal|. |coef| should be the time-varying coefficients of the
+ // filter, as returned by |generateFilterCoefficients|.
+ function timeVaryingFilter(signal, coef) {
+ let length = signal.length;
+ // Use double precision for the internal computations.
+ let y = new Float64Array(length);
+
+ // Prime the pump. (Assumes the signal has length >= 2!)
+ y[0] = coef.b0[0] * signal[0];
+ y[1] =
+ coef.b0[1] * signal[1] + coef.b1[1] * signal[0] - coef.a1[1] * y[0];
+
+ for (let n = 2; n < length; ++n) {
+ y[n] = coef.b0[n] * signal[n] + coef.b1[n] * signal[n - 1] +
+ coef.b2[n] * signal[n - 2];
+ y[n] -= coef.a1[n] * y[n - 1] + coef.a2[n] * y[n - 2];
+ }
+
+ // But convert the result to single precision for comparison.
+ return y.map(Math.fround);
+ }
+
+ // Configure the audio graph using |context|. Returns the biquad filter
+ // node and the AudioBuffer used for the source.
+ function configureGraph(context, toneFrequency) {
+ // The source is just a simple sine wave.
+ let src = context.createBufferSource();
+ let b =
+ context.createBuffer(1, renderDuration * sampleRate, sampleRate);
+ let data = b.getChannelData(0);
+ let omega = 2 * Math.PI * toneFrequency / sampleRate;
+ for (let k = 0; k < data.length; ++k) {
+ data[k] = Math.sin(omega * k);
+ }
+ src.buffer = b;
+ let f = context.createBiquadFilter();
+ src.connect(f);
+ f.connect(context.destination);
+
+ src.start();
+
+ return {filter: f, source: b};
+ }
+
+ function createFilterVerifier(
+ should, filterCreator, threshold, parameters, input, message) {
+ return function(resultBuffer) {
+ let actual = resultBuffer.getChannelData(0);
+ let coefs = generateFilterCoefficients(
+ filterCreator, parameters, automationEndTime);
+
+ reference = timeVaryingFilter(input, coefs);
+
+ should(actual, message).beCloseToArray(reference, {
+ absoluteThreshold: threshold
+ });
+ };
+ }
+
+ // Automate just the frequency parameter. A bandpass filter is used where
+ // the center frequency is swept across the source (which is a simple
+ // tone).
+ audit.define('automate-freq', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // Center frequency of bandpass filter and also the frequency of the
+ // test tone.
+ let centerFreq = 10 * 440;
+
+ // Sweep the frequency +/- 5*440 Hz from the center. This should cause
+ // the output to be low at the beginning and end of the test where the
+ // tone is outside the pass band of the filter, but high in the middle
+ // of the automation time where the tone is near the center of the pass
+ // band. Make sure the frequency sweep stays inside the Nyquist
+ // frequency.
+ let parameters = {freq: [centerFreq - 5 * 440, centerFreq + 5 * 440]};
+ let graph = configureGraph(context, centerFreq);
+ let f = graph.filter;
+ let b = graph.source;
+
+ f.type = 'bandpass';
+ f.frequency.setValueAtTime(parameters.freq[0], 0);
+ f.frequency.linearRampToValueAtTime(
+ parameters.freq[1], automationEndTime);
+
+ context.startRendering()
+ .then(createFilterVerifier(
+ should, createBandpassFilter, 4.6455e-6, parameters,
+ b.getChannelData(0),
+ 'Output of bandpass filter with frequency automation'))
+ .then(() => task.done());
+ });
+
+ // Automate just the Q parameter. A bandpass filter is used where the Q
+ // of the filter is swept.
+ audit.define('automate-q', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // The frequency of the test tone.
+ let centerFreq = 440;
+
+ // Sweep the Q paramter between 1 and 200. This will cause the output
+ // of the filter to pass most of the tone at the beginning to passing
+ // less of the tone at the end. This is because we set center frequency
+ // of the bandpass filter to be slightly off from the actual tone.
+ let parameters = {
+ Q: [1, 200],
+ // Center frequency of the bandpass filter is just 25 Hz above the
+ // tone frequency.
+ freq: [centerFreq + 25, centerFreq + 25]
+ };
+ let graph = configureGraph(context, centerFreq);
+ let f = graph.filter;
+ let b = graph.source;
+
+ f.type = 'bandpass';
+ f.frequency.value = parameters.freq[0];
+ f.Q.setValueAtTime(parameters.Q[0], 0);
+ f.Q.linearRampToValueAtTime(parameters.Q[1], automationEndTime);
+
+ context.startRendering()
+ .then(createFilterVerifier(
+ should, createBandpassFilter, 1.0133e-6, parameters,
+ b.getChannelData(0),
+ 'Output of bandpass filter with Q automation'))
+ .then(() => task.done());
+ });
+
+ // Automate just the gain of the lowshelf filter. A test tone will be in
+ // the lowshelf part of the filter. The output will vary as the gain of
+ // the lowshelf is changed.
+ audit.define('automate-gain', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // Frequency of the test tone.
+ let centerFreq = 440;
+
+ // Set the cutoff frequency of the lowshelf to be significantly higher
+ // than the test tone. Sweep the gain from 20 dB to -20 dB. (We go from
+ // 20 to -20 to easily verify that the filter didn't go unstable.)
+ let parameters = {freq: [3500, 3500], gain: [20, -20]};
+ let graph = configureGraph(context, centerFreq);
+ let f = graph.filter;
+ let b = graph.source;
+
+ f.type = 'lowshelf';
+ f.frequency.value = parameters.freq[0];
+ f.gain.setValueAtTime(parameters.gain[0], 0);
+ f.gain.linearRampToValueAtTime(parameters.gain[1], automationEndTime);
+
+ context.startRendering()
+ .then(createFilterVerifier(
+ should, createLowShelfFilter, 2.7657e-5, parameters,
+ b.getChannelData(0),
+ 'Output of lowshelf filter with gain automation'))
+ .then(() => task.done());
+ });
+
+ // Automate just the detune parameter. Basically the same test as for the
+ // frequncy parameter but we just use the detune parameter to modulate the
+ // frequency parameter.
+ audit.define('automate-detune', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ let centerFreq = 10 * 440;
+ let parameters = {
+ freq: [centerFreq, centerFreq],
+ detune: [-10 * 1200, 10 * 1200]
+ };
+ let graph = configureGraph(context, centerFreq);
+ let f = graph.filter;
+ let b = graph.source;
+
+ f.type = 'bandpass';
+ f.frequency.value = parameters.freq[0];
+ f.detune.setValueAtTime(parameters.detune[0], 0);
+ f.detune.linearRampToValueAtTime(
+ parameters.detune[1], automationEndTime);
+
+ context.startRendering()
+ .then(createFilterVerifier(
+ should, createBandpassFilter, 3.1471e-5, parameters,
+ b.getChannelData(0),
+ 'Output of bandpass filter with detune automation'))
+ .then(() => task.done());
+ });
+
+ // Automate all of the filter parameters at once. This is a basic check
+ // that everything is working. A peaking filter is used because it uses
+ // all of the parameters.
+ audit.define('automate-all', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+ let graph = configureGraph(context, 10 * 440);
+ let f = graph.filter;
+ let b = graph.source;
+
+ // Sweep all of the filter parameters. These are pretty much arbitrary.
+ let parameters = {
+ freq: [8000, 100],
+ Q: [f.Q.value, .0001],
+ gain: [f.gain.value, 20],
+ detune: [2400, -2400]
+ };
+
+ f.type = 'peaking';
+ // Set starting points for all parameters of the filter. Start at 10
+ // kHz for the center frequency, and the defaults for Q and gain.
+ f.frequency.setValueAtTime(parameters.freq[0], 0);
+ f.Q.setValueAtTime(parameters.Q[0], 0);
+ f.gain.setValueAtTime(parameters.gain[0], 0);
+ f.detune.setValueAtTime(parameters.detune[0], 0);
+
+ // Linear ramp each parameter
+ f.frequency.linearRampToValueAtTime(
+ parameters.freq[1], automationEndTime);
+ f.Q.linearRampToValueAtTime(parameters.Q[1], automationEndTime);
+ f.gain.linearRampToValueAtTime(parameters.gain[1], automationEndTime);
+ f.detune.linearRampToValueAtTime(
+ parameters.detune[1], automationEndTime);
+
+ context.startRendering()
+ .then(createFilterVerifier(
+ should, createPeakingFilter, 6.2907e-4, parameters,
+ b.getChannelData(0),
+ 'Output of peaking filter with automation of all parameters'))
+ .then(() => task.done());
+ });
+
+ // Test that modulation of the frequency parameter of the filter works. A
+ // sinusoid of 440 Hz is the test signal that is applied to a bandpass
+ // biquad filter. The frequency parameter of the filter is modulated by a
+ // sinusoid at 103 Hz, and the frequency modulation varies from 116 to 412
+ // Hz. (This test was taken from the description in
+ // https://github.com/WebAudio/web-audio-api/issues/509#issuecomment-94731355)
+ audit.define('modulation', (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate);
+
+ // Create a graph with the sinusoidal source at 440 Hz as the input to a
+ // biquad filter.
+ let graph = configureGraph(context, 440);
+ let f = graph.filter;
+ let b = graph.source;
+
+ f.type = 'bandpass';
+ f.Q.value = 5;
+ f.frequency.value = 264;
+
+ // Create the modulation source, a sinusoid with frequency 103 Hz and
+ // amplitude 148. (The amplitude of 148 is added to the filter's
+ // frequency value of 264 to produce a sinusoidal modulation of the
+ // frequency parameter from 116 to 412 Hz.)
+ let mod = context.createBufferSource();
+ let mbuffer =
+ context.createBuffer(1, renderDuration * sampleRate, sampleRate);
+ let d = mbuffer.getChannelData(0);
+ let omega = 2 * Math.PI * 103 / sampleRate;
+ for (let k = 0; k < d.length; ++k) {
+ d[k] = 148 * Math.sin(omega * k);
+ }
+ mod.buffer = mbuffer;
+
+ mod.connect(f.frequency);
+
+ mod.start();
+ context.startRendering()
+ .then(function(resultBuffer) {
+ let actual = resultBuffer.getChannelData(0);
+ // Compute the filter coefficients using the mod sine wave
+
+ let endFrame = Math.ceil(renderDuration * sampleRate);
+ let nCoef = endFrame;
+ let b0 = new Float64Array(nCoef);
+ let b1 = new Float64Array(nCoef);
+ let b2 = new Float64Array(nCoef);
+ let a1 = new Float64Array(nCoef);
+ let a2 = new Float64Array(nCoef);
+
+ // Generate the filter coefficients when the frequency varies from
+ // 116 to 248 Hz using the 103 Hz sinusoid.
+ for (let k = 0; k < nCoef; ++k) {
+ let freq = f.frequency.value + d[k];
+ let c = createBandpassFilter(
+ freq / (sampleRate / 2), f.Q.value, f.gain.value);
+ b0[k] = c.b0;
+ b1[k] = c.b1;
+ b2[k] = c.b2;
+ a1[k] = c.a1;
+ a2[k] = c.a2;
+ }
+ reference = timeVaryingFilter(
+ b.getChannelData(0),
+ {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2});
+
+ should(
+ actual,
+ 'Output of bandpass filter with sinusoidal modulation of bandpass center frequency')
+ .beCloseToArray(reference, {absoluteThreshold: 3.9787e-5});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-bandpass.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-bandpass.html
new file mode 100644
index 0000000000..166aa9b3cb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-bandpass.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-bandpass.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad bandpass filter.'},
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ let filterParameters = [
+ {cutoff: 0, q: 0, gain: 1},
+ {cutoff: 1, q: 0, gain: 1},
+ {cutoff: 0.5, q: 0, gain: 1},
+ {cutoff: 0.25, q: 1, gain: 1},
+ ];
+
+ createTestAndRun(context, 'bandpass', {
+ should: should,
+ threshold: 2.2501e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-basic.html
new file mode 100644
index 0000000000..441e98a251
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-basic.html
@@ -0,0 +1,134 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Basic BiquadFilterNode Properties
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let testFrames = 100;
+
+ // Global context that can be used by the individual tasks. It must be
+ // defined by the initialize task.
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ should(() => {
+ context = new OfflineAudioContext(1, testFrames, sampleRate);
+ }, 'Initialize context for testing').notThrow();
+ task.done();
+ });
+
+ audit.define('existence', (task, should) => {
+ should(context.createBiquadFilter, 'context.createBiquadFilter')
+ .exist();
+ task.done();
+ });
+
+ audit.define('parameters', (task, should) => {
+ // Create a really simple IIR filter. Doesn't much matter what.
+ let coef = Float32Array.from([1]);
+
+ let f = context.createBiquadFilter(coef, coef);
+
+ should(f.numberOfInputs, 'numberOfInputs').beEqualTo(1);
+ should(f.numberOfOutputs, 'numberOfOutputs').beEqualTo(1);
+ should(f.channelCountMode, 'channelCountMode').beEqualTo('max');
+ should(f.channelInterpretation, 'channelInterpretation')
+ .beEqualTo('speakers');
+
+ task.done();
+ });
+
+ audit.define('exceptions-createBiquadFilter', (task, should) => {
+ should(function() {
+ // Two args are required.
+ context.createBiquadFilter();
+ }, 'createBiquadFilter()').notThrow();
+
+ task.done();
+ });
+
+ audit.define('exceptions-getFrequencyData', (task, should) => {
+ // Create a really simple IIR filter. Doesn't much matter what.
+ let coef = Float32Array.from([1]);
+
+ let f = context.createBiquadFilter(coef, coef);
+
+ should(
+ function() {
+ // frequencyHz can't be null.
+ f.getFrequencyResponse(
+ null, new Float32Array(1), new Float32Array(1));
+ },
+ 'getFrequencyResponse(' +
+ 'null, ' +
+ 'new Float32Array(1), ' +
+ 'new Float32Array(1))')
+ .throw(TypeError);
+
+ should(
+ function() {
+ // magResponse can't be null.
+ f.getFrequencyResponse(
+ new Float32Array(1), null, new Float32Array(1));
+ },
+ 'getFrequencyResponse(' +
+ 'new Float32Array(1), ' +
+ 'null, ' +
+ 'new Float32Array(1))')
+ .throw(TypeError);
+
+ should(
+ function() {
+ // phaseResponse can't be null.
+ f.getFrequencyResponse(
+ new Float32Array(1), new Float32Array(1), null);
+ },
+ 'getFrequencyResponse(' +
+ 'new Float32Array(1), ' +
+ 'new Float32Array(1), ' +
+ 'null)')
+ .throw(TypeError);
+
+ should(
+ function() {
+ // magResponse array must the same length as frequencyHz
+ f.getFrequencyResponse(
+ new Float32Array(10), new Float32Array(1),
+ new Float32Array(20));
+ },
+ 'getFrequencyResponse(' +
+ 'new Float32Array(10), ' +
+ 'new Float32Array(1), ' +
+ 'new Float32Array(20))')
+ .throw(DOMException, 'InvalidAccessError');
+
+ should(
+ function() {
+ // phaseResponse array must be the same length as frequencyHz
+ f.getFrequencyResponse(
+ new Float32Array(10), new Float32Array(20),
+ new Float32Array(1));
+ },
+ 'getFrequencyResponse(' +
+ 'new Float32Array(10), ' +
+ 'new Float32Array(20), ' +
+ 'new Float32Array(1))')
+ .throw(DOMException, 'InvalidAccessError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-getFrequencyResponse.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-getFrequencyResponse.html
new file mode 100644
index 0000000000..23222e4df9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-getFrequencyResponse.html
@@ -0,0 +1,394 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test BiquadFilter getFrequencyResponse() functionality
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Test the frequency response of a biquad filter. We compute the
+ // frequency response for a simple peaking biquad filter and compare it
+ // with the expected frequency response. The actual filter used doesn't
+ // matter since we're testing getFrequencyResponse and not the actual
+ // filter output. The filters are extensively tested in other biquad
+ // tests.
+
+ // The magnitude response of the biquad filter.
+ let magResponse;
+
+ // The phase response of the biquad filter.
+ let phaseResponse;
+
+ // Number of frequency samples to take.
+ let numberOfFrequencies = 1000;
+
+ // The filter parameters.
+ let filterCutoff = 1000; // Hz.
+ let filterQ = 1;
+ let filterGain = 5; // Decibels.
+
+ // The magnitudes and phases of the reference frequency response.
+ let expectedMagnitudes;
+ let expectedPhases;
+
+ // Convert frequency in Hz to a normalized frequency between 0 to 1 with 1
+ // corresponding to the Nyquist frequency.
+ function normalizedFrequency(freqHz, sampleRate) {
+ let nyquist = sampleRate / 2;
+ return freqHz / nyquist;
+ }
+
+ // Get the filter response at a (normalized) frequency |f| for the filter
+ // with coefficients |coef|.
+ function getResponseAt(coef, f) {
+ let b0 = coef.b0;
+ let b1 = coef.b1;
+ let b2 = coef.b2;
+ let a1 = coef.a1;
+ let a2 = coef.a2;
+
+ // H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2)
+ //
+ // Compute H(exp(i * pi * f)). No native complex numbers in javascript,
+ // so break H(exp(i * pi * // f)) in to the real and imaginary parts of
+ // the numerator and denominator. Let omega = pi * f. Then the
+ // numerator is
+ //
+ // b0 + b1 * cos(omega) + b2 * cos(2 * omega) - i * (b1 * sin(omega) +
+ // b2 * sin(2 * omega))
+ //
+ // and the denominator is
+ //
+ // 1 + a1 * cos(omega) + a2 * cos(2 * omega) - i * (a1 * sin(omega) + a2
+ // * sin(2 * omega))
+ //
+ // Compute the magnitude and phase from the real and imaginary parts.
+
+ let omega = Math.PI * f;
+ let numeratorReal =
+ b0 + b1 * Math.cos(omega) + b2 * Math.cos(2 * omega);
+ let numeratorImag = -(b1 * Math.sin(omega) + b2 * Math.sin(2 * omega));
+ let denominatorReal =
+ 1 + a1 * Math.cos(omega) + a2 * Math.cos(2 * omega);
+ let denominatorImag =
+ -(a1 * Math.sin(omega) + a2 * Math.sin(2 * omega));
+
+ let magnitude = Math.sqrt(
+ (numeratorReal * numeratorReal + numeratorImag * numeratorImag) /
+ (denominatorReal * denominatorReal +
+ denominatorImag * denominatorImag));
+ let phase = Math.atan2(numeratorImag, numeratorReal) -
+ Math.atan2(denominatorImag, denominatorReal);
+
+ if (phase >= Math.PI) {
+ phase -= 2 * Math.PI;
+ } else if (phase <= -Math.PI) {
+ phase += 2 * Math.PI;
+ }
+
+ return {magnitude: magnitude, phase: phase};
+ }
+
+ // Compute the reference frequency response for the biquad filter |filter|
+ // at the frequency samples given by |frequencies|.
+ function frequencyResponseReference(filter, frequencies) {
+ let sampleRate = filter.context.sampleRate;
+ let normalizedFreq =
+ normalizedFrequency(filter.frequency.value, sampleRate);
+ let filterCoefficients = createFilter(
+ filter.type, normalizedFreq, filter.Q.value, filter.gain.value);
+
+ let magnitudes = [];
+ let phases = [];
+
+ for (let k = 0; k < frequencies.length; ++k) {
+ let response = getResponseAt(
+ filterCoefficients,
+ normalizedFrequency(frequencies[k], sampleRate));
+ magnitudes.push(response.magnitude);
+ phases.push(response.phase);
+ }
+
+ return {magnitudes: magnitudes, phases: phases};
+ }
+
+ // Compute a set of linearly spaced frequencies.
+ function createFrequencies(nFrequencies, sampleRate) {
+ let frequencies = new Float32Array(nFrequencies);
+ let nyquist = sampleRate / 2;
+ let freqDelta = nyquist / nFrequencies;
+
+ for (let k = 0; k < nFrequencies; ++k) {
+ frequencies[k] = k * freqDelta;
+ }
+
+ return frequencies;
+ }
+
+ function linearToDecibels(x) {
+ if (x) {
+ return 20 * Math.log(x) / Math.LN10;
+ } else {
+ return -1000;
+ }
+ }
+
+ function decibelsToLinear(x) {
+ return Math.pow(10, x/20);
+ }
+
+ // Look through the array and find any NaN or infinity. Returns the index
+ // of the first occurence or -1 if none.
+ function findBadNumber(signal) {
+ for (let k = 0; k < signal.length; ++k) {
+ if (!isValidNumber(signal[k])) {
+ return k;
+ }
+ }
+ return -1;
+ }
+
+ // Compute absolute value of the difference between phase angles, taking
+ // into account the wrapping of phases.
+ function absolutePhaseDifference(x, y) {
+ let diff = Math.abs(x - y);
+
+ if (diff > Math.PI) {
+ diff = 2 * Math.PI - diff;
+ }
+ return diff;
+ }
+
+ // Compare the frequency response with our expected response.
+ //
+ // should - The |should| method provided by audit.define
+ // filter - The filter node used in the test
+ // frequencies - array of frequencies provided to |getFrequencyResponse|
+ // magResponse - mag response from |getFrequencyResponse|
+ // phaseResponse - phase response from |getFrequencyResponse|
+ // maxAllowedMagError - error threshold for mag response, in dB
+ // maxAllowedPhaseError - error threshold for phase response, in rad.
+ function compareResponses(
+ should, filter, frequencies, magResponse, phaseResponse,
+ maxAllowedMagError, maxAllowedPhaseError) {
+ let expectedResponse = frequencyResponseReference(filter, frequencies);
+
+ expectedMagnitudes = expectedResponse.magnitudes;
+ expectedPhases = expectedResponse.phases;
+
+ let n = magResponse.length;
+ let badResponse = false;
+
+ let maxMagError = -1;
+ let maxMagErrorIndex = -1;
+
+ let k;
+ let hasBadNumber;
+
+ hasBadNumber = findBadNumber(magResponse);
+ badResponse =
+ !should(
+ hasBadNumber >= 0 ? 1 : 0,
+ filter.type +
+ ': Number of non-finite values in magnitude response')
+ .beEqualTo(0);
+
+ hasBadNumber = findBadNumber(phaseResponse);
+ badResponse =
+ !should(
+ hasBadNumber >= 0 ? 1 : 0,
+ filter.type + ': Number of non-finte values in phase response')
+ .beEqualTo(0);
+
+ // These aren't testing the implementation itself. Instead, these are
+ // sanity checks on the reference. Failure here does not imply an error
+ // in the implementation.
+ hasBadNumber = findBadNumber(expectedMagnitudes);
+ badResponse =
+ !should(
+ hasBadNumber >= 0 ? 1 : 0,
+ filter.type +
+ ': Number of non-finite values in the expected magnitude response')
+ .beEqualTo(0);
+
+ hasBadNumber = findBadNumber(expectedPhases);
+ badResponse =
+ !should(
+ hasBadNumber >= 0 ? 1 : 0,
+ filter.type +
+ ': Number of non-finite values in expected phase response')
+ .beEqualTo(0);
+
+ // If we found a NaN or infinity, the following tests aren't very
+ // helpful, especially for NaN. We run them anyway, after printing a
+ // warning message.
+ should(
+ !badResponse,
+ filter.type +
+ ': Actual and expected results contained only finite values')
+ .beTrue();
+
+ for (k = 0; k < n; ++k) {
+ let error = Math.abs(
+ linearToDecibels(magResponse[k]) -
+ linearToDecibels(expectedMagnitudes[k]));
+ if (error > maxMagError) {
+ maxMagError = error;
+ maxMagErrorIndex = k;
+ }
+ }
+
+ should(
+ linearToDecibels(maxMagError),
+ filter.type + ': Max error (' + linearToDecibels(maxMagError) +
+ ' dB) of magnitude response at frequency ' +
+ frequencies[maxMagErrorIndex] + ' Hz')
+ .beLessThanOrEqualTo(linearToDecibels(maxAllowedMagError));
+ let maxPhaseError = -1;
+ let maxPhaseErrorIndex = -1;
+
+ for (k = 0; k < n; ++k) {
+ let error =
+ absolutePhaseDifference(phaseResponse[k], expectedPhases[k]);
+ if (error > maxPhaseError) {
+ maxPhaseError = error;
+ maxPhaseErrorIndex = k;
+ }
+ }
+
+ should(
+ radToDegree(maxPhaseError),
+ filter.type + ': Max error (' + radToDegree(maxPhaseError) +
+ ' deg) in phase response at frequency ' +
+ frequencies[maxPhaseErrorIndex] + ' Hz')
+ .beLessThanOrEqualTo(radToDegree(maxAllowedPhaseError));
+ }
+
+ function radToDegree(rad) {
+ // Radians to degrees
+ return rad * 180 / Math.PI;
+ }
+
+ // Test the getFrequencyResponse for each of filter types. Each entry in
+ // this array is a dictionary with these elements:
+ //
+ // type: filter type to be tested
+ // maxErrorInMagnitude: Allowed error in computed magnitude response
+ // maxErrorInPhase: Allowed error in computed magnitude phase
+ [{
+ type: 'lowpass',
+ maxErrorInMagnitude: decibelsToLinear(-73.0178),
+ maxErrorInPhase: 8.04360e-6
+ },
+ {
+ type: 'highpass',
+ maxErrorInMagnitude: decibelsToLinear(-117.5461),
+ maxErrorInPhase: 6.9691e-6
+ },
+ {
+ type: 'bandpass',
+ maxErrorInMagnitude: decibelsToLinear(-79.0139),
+ maxErrorInPhase: 4.9371e-6
+ },
+ {
+ type: 'lowshelf',
+ maxErrorInMagnitude: decibelsToLinear(-120.4038),
+ maxErrorInPhase: 4.0724e-6
+ },
+ {
+ type: 'highshelf',
+ maxErrorInMagnitude: decibelsToLinear(-120, 1303),
+ maxErrorInPhase: 4.0724e-6
+ },
+ {
+ type: 'peaking',
+ maxErrorInMagnitude: decibelsToLinear(-119.1176),
+ maxErrorInPhase: 6.4724e-8
+ },
+ {
+ type: 'notch',
+ maxErrorInMagnitude: decibelsToLinear(-87.0808),
+ maxErrorInPhase: 6.6300e-6
+ },
+ {
+ type: 'allpass',
+ maxErrorInMagnitude: decibelsToLinear(-265.3517),
+ maxErrorInPhase: 1.3260e-5
+ }].forEach(test => {
+ audit.define(
+ {label: test.type, description: 'Frequency response'},
+ (task, should) => {
+ let context = new AudioContext();
+
+ let filter = new BiquadFilterNode(context, {
+ type: test.type,
+ frequency: filterCutoff,
+ Q: filterQ,
+ gain: filterGain
+ });
+
+ let frequencies =
+ createFrequencies(numberOfFrequencies, context.sampleRate);
+ magResponse = new Float32Array(numberOfFrequencies);
+ phaseResponse = new Float32Array(numberOfFrequencies);
+
+ filter.getFrequencyResponse(
+ frequencies, magResponse, phaseResponse);
+ compareResponses(
+ should, filter, frequencies, magResponse, phaseResponse,
+ test.maxErrorInMagnitude, test.maxErrorInPhase);
+
+ task.done();
+ });
+ });
+
+ audit.define(
+ {
+ label: 'getFrequencyResponse',
+ description: 'Test out-of-bounds frequency values'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(1, 1, sampleRate);
+ let filter = new BiquadFilterNode(context);
+
+ // Frequencies to test. These are all outside the valid range of
+ // frequencies of 0 to Nyquist.
+ let freq = new Float32Array(2);
+ freq[0] = -1;
+ freq[1] = context.sampleRate / 2 + 1;
+
+ let mag = new Float32Array(freq.length);
+ let phase = new Float32Array(freq.length);
+
+ filter.getFrequencyResponse(freq, mag, phase);
+
+ // Verify that the returned magnitude and phase entries are alL NaN
+ // since the frequencies are outside the valid range
+ for (let k = 0; k < mag.length; ++k) {
+ should(mag[k],
+ 'Magnitude response at frequency ' + freq[k])
+ .beNaN();
+ }
+
+ for (let k = 0; k < phase.length; ++k) {
+ should(phase[k],
+ 'Phase response at frequency ' + freq[k])
+ .beNaN();
+ }
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highpass.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highpass.html
new file mode 100644
index 0000000000..45c335bc4b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highpass.html
@@ -0,0 +1,42 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-highpass.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad highpass filter'},
+ function(task, should) {
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ let filterParameters = [
+ {cutoff: 0, q: 1, gain: 1},
+ {cutoff: 1, q: 1, gain: 1},
+ {cutoff: 0.25, q: 1, gain: 1},
+ ];
+
+ createTestAndRun(context, 'highpass', {
+ should: should,
+ threshold: 1.5487e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highshelf.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highshelf.html
new file mode 100644
index 0000000000..345195f104
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-highshelf.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-highshelf.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad highshelf filter'},
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ let filterParameters = [
+ {cutoff: 0, q: 10, gain: 10},
+ {cutoff: 1, q: 10, gain: 10},
+ {cutoff: 0.25, q: 10, gain: 10},
+ ];
+
+ createTestAndRun(context, 'highshelf', {
+ should: should,
+ threshold: 6.2577e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html
new file mode 100644
index 0000000000..d20786e36b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowpass.html
@@ -0,0 +1,45 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-lowpass.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad lowpass filter'},
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ let filterParameters = [
+ {cutoff: 0, q: 1, gain: 1},
+ {cutoff: 1, q: 1, gain: 1},
+ {cutoff: 0.25, q: 1, gain: 1},
+ {cutoff: 0.25, q: 1, gain: 1, detune: 100},
+ {cutoff: 0.01, q: 1, gain: 1, detune: -200},
+ ];
+
+ createTestAndRun(context, 'lowpass', {
+ should: should,
+ threshold: 9.7869e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowshelf.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowshelf.html
new file mode 100644
index 0000000000..ab76cefd4b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-lowshelf.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-lowshelf.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad lowshelf filter'},
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ let filterParameters = [
+ {cutoff: 0, q: 10, gain: 10},
+ {cutoff: 1, q: 10, gain: 10},
+ {cutoff: 0.25, q: 10, gain: 10},
+ ];
+
+ createTestAndRun(context, 'lowshelf', {
+ should: should,
+ threshold: 3.8349e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-notch.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-notch.html
new file mode 100644
index 0000000000..98e6e6e02c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-notch.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-notch.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad notch filter'},
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ let filterParameters = [
+ {cutoff: 0, q: 10, gain: 1},
+ {cutoff: 1, q: 10, gain: 1},
+ {cutoff: .5, q: 0, gain: 1},
+ {cutoff: 0.25, q: 10, gain: 1},
+ ];
+
+ createTestAndRun(context, 'notch', {
+ should: should,
+ threshold: 1.9669e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-peaking.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-peaking.html
new file mode 100644
index 0000000000..90b7c1546d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-peaking.html
@@ -0,0 +1,46 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-peaking.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ <script src="/webaudio/resources/biquad-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Biquad peaking filter'},
+ function(task, should) {
+
+ window.jsTestIsAsync = true;
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ // The filters we want to test.
+ let filterParameters = [
+ {cutoff: 0, q: 10, gain: 10},
+ {cutoff: 1, q: 10, gain: 10},
+ {cutoff: .5, q: 0, gain: 10},
+ {cutoff: 0.25, q: 10, gain: 10},
+ ];
+
+ createTestAndRun(context, 'peaking', {
+ should: should,
+ threshold: 5.8234e-8,
+ filterParameters: filterParameters
+ }).then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-tail.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-tail.html
new file mode 100644
index 0000000000..3141bf7ff3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquad-tail.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Biquad Tail Output
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // A high sample rate shows the issue more clearly.
+ let sampleRate = 192000;
+ // Some short duration because we don't need to run the test for very
+ // long.
+ let testDurationSec = 0.5;
+ let testDurationFrames = testDurationSec * sampleRate;
+
+ // Amplitude experimentally determined to give a biquad output close to 1.
+ // (No attempt was made to produce exactly 1; it's not needed.)
+ let sourceAmplitude = 100;
+
+ // The output of the biquad filter should not change by more than this
+ // much between output samples. Threshold was determined experimentally.
+ let glitchThreshold = 0.012968;
+
+ // Test that a Biquad filter doesn't have it's output terminated because
+ // the input has gone away. Generally, when a source node is finished, it
+ // disconnects itself from any downstream nodes. This is the correct
+ // behavior. Nodes that have no inputs (disconnected) are generally
+ // assumed to output zeroes. This is also desired behavior. However,
+ // biquad filters have memory so they should not suddenly output zeroes
+ // when the input is disconnected. This test checks to see if the output
+ // doesn't suddenly change to zero.
+ audit.define(
+ {label: 'test', description: 'Biquad Tail Output'},
+ function(task, should) {
+ let context =
+ new OfflineAudioContext(1, testDurationFrames, sampleRate);
+
+ // Create an impulse source.
+ let buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = sourceAmplitude;
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Create the biquad filter. It doesn't really matter what kind, so
+ // the default filter type and parameters is fine. Connect the
+ // source to it.
+ let biquad = context.createBiquadFilter();
+ source.connect(biquad);
+ biquad.connect(context.destination);
+
+ source.start();
+
+ context.startRendering().then(function(result) {
+ // There should be no large discontinuities in the output
+ should(result.getChannelData(0), 'Biquad output')
+ .notGlitch(glitchThreshold);
+ task.done();
+ })
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquadfilternode-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquadfilternode-basic.html
new file mode 100644
index 0000000000..7e71d07302
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/biquadfilternode-basic.html
@@ -0,0 +1,64 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquadfilternode-basic.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Basic tests for BiquadFilterNode'},
+ function(task, should) {
+
+ let context = new AudioContext();
+ let filter = context.createBiquadFilter();
+
+ should(filter.numberOfInputs, 'Number of inputs').beEqualTo(1);
+
+ should(filter.numberOfOutputs, 'Number of outputs').beEqualTo(1);
+
+ should(filter.type, 'Default filter type').beEqualTo('lowpass');
+
+ should(filter.frequency.value, 'Default frequency value')
+ .beEqualTo(350);
+
+ should(filter.Q.value, 'Default Q value').beEqualTo(1);
+
+ should(filter.gain.value, 'Default gain value').beEqualTo(0);
+
+ // Check that all legal filter types can be set.
+ let filterTypeArray = [
+ {type: 'lowpass'}, {type: 'highpass'}, {type: 'bandpass'},
+ {type: 'lowshelf'}, {type: 'highshelf'}, {type: 'peaking'},
+ {type: 'notch'}, {type: 'allpass'}
+ ];
+
+ for (let i = 0; i < filterTypeArray.length; ++i) {
+ should(
+ () => filter.type = filterTypeArray[i].type,
+ 'Setting filter.type to ' + filterTypeArray[i].type)
+ .notThrow();
+ should(filter.type, 'Filter type is')
+ .beEqualTo(filterTypeArray[i].type);
+ }
+
+
+ // Check that numerical values are no longer supported
+ filter.type = 99;
+ should(filter.type, 'Setting filter.type to (invalid) 99')
+ .notBeEqualTo(99);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/ctor-biquadfilter.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/ctor-biquadfilter.html
new file mode 100644
index 0000000000..e63479f985
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/ctor-biquadfilter.html
@@ -0,0 +1,86 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: BiquadFilter
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'BiquadFilterNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'BiquadFilterNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [
+ {name: 'type', value: 'lowpass'}, {name: 'Q', value: 1},
+ {name: 'detune', value: 0}, {name: 'frequency', value: 350},
+ {name: 'gain', value: 0.0}
+ ]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'BiquadFilterNode');
+ task.done();
+ });
+
+ audit.define('construct with options', (task, should) => {
+ let node;
+ let options = {
+ type: 'highpass',
+ frequency: 512,
+ detune: 1,
+ Q: 5,
+ gain: 3,
+ };
+
+ should(
+ () => {
+ node = new BiquadFilterNode(context, options);
+ },
+ 'node = new BiquadFilterNode(..., ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ // Test that attributes are set according to the option values.
+ should(node.type, 'node.type').beEqualTo(options.type);
+ should(node.frequency.value, 'node.frequency.value')
+ .beEqualTo(options.frequency);
+ should(node.detune.value, 'node.detuen.value')
+ .beEqualTo(options.detune);
+ should(node.Q.value, 'node.Q.value').beEqualTo(options.Q);
+ should(node.gain.value, 'node.gain.value').beEqualTo(options.gain);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html
new file mode 100644
index 0000000000..79dc27035c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html
@@ -0,0 +1,288 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ biquad-bandpass.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/biquad-filters.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // In the tests below, the initial values are not important, except that
+ // we wanted them to be all different so that the output contains
+ // different values for the first few samples. Otherwise, the actual
+ // values don't really matter. A peaking filter is used because the
+ // frequency, Q, gain, and detune parameters are used by this filter.
+ //
+ // Also, for the changeList option, the times and new values aren't really
+ // important. They just need to change so that we can verify that the
+ // outputs from the .value setter still matches the output from the
+ // corresponding setValueAtTime.
+ audit.define(
+ {label: 'Test 0', description: 'No dezippering for frequency'},
+ (task, should) => {
+ doTest(should, {
+ paramName: 'frequency',
+ initializer: {type: 'peaking', Q: 1, gain: 5},
+ changeList:
+ [{quantum: 2, newValue: 800}, {quantum: 7, newValue: 200}],
+ threshold: 3.0399e-6
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'Test 1', description: 'No dezippering for detune'},
+ (task, should) => {
+ doTest(should, {
+ paramName: 'detune',
+ initializer:
+ {type: 'peaking', frequency: 400, Q: 3, detune: 33, gain: 10},
+ changeList:
+ [{quantum: 2, newValue: 1000}, {quantum: 5, newValue: -400}],
+ threshold: 4.0532e-6
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'Test 2', description: 'No dezippering for Q'},
+ (task, should) => {
+ doTest(should, {
+ paramName: 'Q',
+ initializer: {type: 'peaking', Q: 5},
+ changeList:
+ [{quantum: 2, newValue: 10}, {quantum: 8, newValue: -10}]
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'Test 3', description: 'No dezippering for gain'},
+ (task, should) => {
+ doTest(should, {
+ paramName: 'gain',
+ initializer: {type: 'peaking', gain: 1},
+ changeList:
+ [{quantum: 2, newValue: 5}, {quantum: 6, newValue: -.3}],
+ threshold: 1.9074e-6
+ }).then(() => task.done());
+ });
+
+ // This test compares the filter output against a JS implementation of the
+ // filter. We're only testing a change in the frequency for a lowpass
+ // filter. This assumes we don't need to test other AudioParam changes
+ // with JS code because any mistakes would be exposed in the tests above.
+ audit.define(
+ {
+ label: 'Test 4',
+ description: 'No dezippering of frequency vs JS filter'
+ },
+ (task, should) => {
+ // Channel 0 is the source, channel 1 is the filtered output.
+ let context = new OfflineAudioContext(2, 2048, 16384);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+ let f = new BiquadFilterNode(context, {type: 'lowpass'});
+
+ // Remember the initial filter parameters.
+ let initialFilter = {
+ type: f.type,
+ frequency: f.frequency.value,
+ gain: f.gain.value,
+ detune: f.detune.value,
+ Q: f.Q.value
+ };
+
+ src.connect(merger, 0, 0);
+ src.connect(f).connect(merger, 0, 1);
+
+ // Apply the filter change at frame |changeFrame| with a new
+ // frequency value of |newValue|.
+ let changeFrame = 2 * RENDER_QUANTUM_FRAMES;
+ let newValue = 750;
+
+ context.suspend(changeFrame / context.sampleRate)
+ .then(() => f.frequency.value = newValue)
+ .then(() => context.resume());
+
+ src.start();
+
+ context.startRendering()
+ .then(audio => {
+ let signal = audio.getChannelData(0);
+ let actual = audio.getChannelData(1);
+
+ // Get initial filter coefficients and updated coefficients
+ let nyquistFreq = context.sampleRate / 2;
+ let initialCoef = createFilter(
+ initialFilter.type, initialFilter.frequency / nyquistFreq,
+ initialFilter.Q, initialFilter.gain);
+
+ let finalCoef = createFilter(
+ f.type, f.frequency.value / nyquistFreq, f.Q.value,
+ f.gain.value);
+
+ let expected = new Float32Array(signal.length);
+
+ // Filter the initial part of the signal.
+ expected[0] =
+ filterSample(signal[0], initialCoef, 0, 0, 0, 0);
+ expected[1] = filterSample(
+ signal[1], initialCoef, expected[0], 0, signal[0], 0);
+
+ for (let k = 2; k < changeFrame; ++k) {
+ expected[k] = filterSample(
+ signal[k], initialCoef, expected[k - 1],
+ expected[k - 2], signal[k - 1], signal[k - 2]);
+ }
+
+ // Filter the rest of the input with the new coefficients
+ for (let k = changeFrame; k < signal.length; ++k) {
+ expected[k] = filterSample(
+ signal[k], finalCoef, expected[k - 1], expected[k - 2],
+ signal[k - 1], signal[k - 2]);
+ }
+
+ // The JS filter should match the actual output.
+ let match =
+ should(actual, 'Output from ' + f.type + ' filter')
+ .beCloseToArray(
+ expected, {absoluteThreshold: 6.8546e-7});
+ should(match, 'Output matches JS filter results').beTrue();
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'Test 5', description: 'Test with modulation'},
+ (task, should) => {
+ doTest(should, {
+ prefix: 'Modulation: ',
+ paramName: 'frequency',
+ initializer: {type: 'peaking', Q: 5, gain: 5},
+ modulation: true,
+ changeList:
+ [{quantum: 2, newValue: 10}, {quantum: 8, newValue: -10}]
+ }).then(() => task.done());
+
+ });
+
+ audit.run();
+
+ // Run test, returning the promise from startRendering. |options|
+ // specifies the parameters for the test. |options.paramName| is the name
+ // of the AudioParam of the filter that is being tested.
+ // |options.initializer| is the initial value to be used in constructing
+ // the filter. |options.changeList| is an array consisting of dictionary
+ // with two members: |quantum| is the rendering quantum at which time we
+ // want to change the AudioParam value, and |newValue| is the value to be
+ // used.
+ function doTest(should, options) {
+ let paramName = options.paramName;
+ let newValue = options.newValue;
+ let prefix = options.prefix || '';
+
+ // Create offline audio context. The sample rate should be a power of
+ // two to eliminate any round-off errors in computing the time at which
+ // to suspend the context for the parameter change. The length is
+ // fairly arbitrary as long as it's big enough to the changeList
+ // values. There are two channels: channel 0 is output for the filter
+ // under test, and channel 1 is the output of referencef filter.
+ let context = new OfflineAudioContext(2, 2048, 16384);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ // |f0| is the filter under test that will have its AudioParam value
+ // changed. |f1| is the reference filter that uses setValueAtTime to
+ // update the AudioParam value.
+ let f0 = new BiquadFilterNode(context, options.initializer);
+ let f1 = new BiquadFilterNode(context, options.initializer);
+
+ src.connect(f0).connect(merger, 0, 0);
+ src.connect(f1).connect(merger, 0, 1);
+
+ // Modulate the AudioParam with an input signal, if requested.
+ if (options.modulation) {
+ // The modulation signal is a sine wave with amplitude 1/3 the cutoff
+ // frequency of the test filter. The amplitude is fairly arbitrary,
+ // but we want it to be a significant fraction of the cutoff so that
+ // the cutoff varies quite a bit in the test.
+ let mod =
+ new OscillatorNode(context, {type: 'sawtooth', frequency: 1000});
+ let modGain = new GainNode(context, {gain: f0.frequency.value / 3});
+ mod.connect(modGain);
+ modGain.connect(f0[paramName]);
+ modGain.connect(f1[paramName]);
+ mod.start();
+ }
+ // Output a message showing where we're starting from.
+ should(f0[paramName].value, prefix + `At time 0, ${paramName}`)
+ .beEqualTo(f0[paramName].value);
+
+ // Schedule all of the desired changes from |changeList|.
+ options.changeList.forEach(change => {
+ let changeTime =
+ change.quantum * RENDER_QUANTUM_FRAMES / context.sampleRate;
+ let value = change.newValue;
+
+ // Just output a message to show what we're doing.
+ should(value, prefix + `At time ${changeTime}, ${paramName}`)
+ .beEqualTo(value);
+
+ // Update the AudioParam value of each filter using setValueAtTime or
+ // the value setter.
+ f1[paramName].setValueAtTime(value, changeTime);
+ context.suspend(changeTime)
+ .then(() => f0[paramName].value = value)
+ .then(() => context.resume());
+ });
+
+ src.start();
+
+ return context.startRendering().then(audio => {
+ let actual = audio.getChannelData(0);
+ let expected = audio.getChannelData(1);
+
+ // The output from both filters MUST match exactly if dezippering has
+ // been properly removed.
+ let match = should(actual, `${prefix}Output from ${paramName} setter`)
+ .beCloseToArray(
+ expected, {absoluteThreshold: options.threshold});
+
+ // Just an extra message saying that what we're comparing, to make the
+ // output clearer. (Not really neceesary, but nice.)
+ should(
+ match,
+ `${prefix}Output from ${
+ paramName
+ } setter matches setValueAtTime output`)
+ .beTrue();
+ });
+ }
+
+ // Filter one sample:
+ //
+ // y[n] = b0 * x[n] + b1*x[n-1] + b2*x[n-2] - a1*y[n-1] - a2*y[n-2]
+ //
+ // where |x| is x[n], |xn1| is x[n-1], |xn2| is x[n-2], |yn1| is y[n-1],
+ // and |yn2| is y[n-2]. |coef| is a dictonary of the filter coefficients
+ // |b0|, |b1|, |b2|, |a1|, and |a2|.
+ function filterSample(x, coef, yn1, yn2, xn1, xn2) {
+ return coef.b0 * x + coef.b1 * xn1 + coef.b2 * xn2 - coef.a1 * yn1 -
+ coef.a2 * yn2;
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html
new file mode 100644
index 0000000000..6136583b90
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html
@@ -0,0 +1,93 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Active Processing for ChannelMergerNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script id="layout-test-code">
+ // AudioProcessor that sends a message to its AudioWorkletNode whenver the
+ // number of channels on its input changes.
+ let filePath =
+ '../the-audioworklet-interface/processors/active-processing.js';
+
+ const audit = Audit.createTaskRunner();
+
+ let context;
+
+ audit.define('initialize', (task, should) => {
+ // Create context and load the module
+ context = new AudioContext();
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'AudioWorklet module loading')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('test', (task, should) => {
+ const src = new OscillatorNode(context);
+
+ // Number of inputs for the ChannelMergerNode. Pretty arbitrary, but
+ // should not be 1.
+ const numberOfInputs = 7;
+ const merger =
+ new ChannelMergerNode(context, {numberOfInputs: numberOfInputs});
+
+ const testerNode =
+ new AudioWorkletNode(context, 'active-processing-tester', {
+ // Use as short a duration as possible to keep the test from
+ // taking too much time.
+ processorOptions: {testDuration: .5},
+ });
+
+ // Expected number of output channels from the merger node. We should
+ // start with the number of inputs, because the source (oscillator) is
+ // actively processing. When the source stops, the number of channels
+ // should change to 1.
+ const expectedValues = [numberOfInputs, 1];
+ let index = 0;
+
+ testerNode.port.onmessage = event => {
+ let count = event.data.channelCount;
+ let finished = event.data.finished;
+
+ // If we're finished, end testing.
+ if (finished) {
+ // Verify that we got the expected number of changes.
+ should(index, 'Number of distinct values')
+ .beEqualTo(expectedValues.length);
+
+ task.done();
+ return;
+ }
+
+ if (index < expectedValues.length) {
+ // Verify that the number of channels matches the expected number of
+ // channels.
+ should(count, `Test ${index}: Number of convolver output channels`)
+ .beEqualTo(expectedValues[index]);
+ }
+
+ ++index;
+ };
+
+ // Create the graph and go
+ src.connect(merger).connect(testerNode).connect(context.destination);
+ src.start();
+
+ // Stop the source after a short time so we can test that the channel
+ // merger changes to not actively processing and thus produces a single
+ // channel of silence.
+ src.stop(context.currentTime + .1);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-basic.html
new file mode 100644
index 0000000000..71a62f176f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-basic.html
@@ -0,0 +1,67 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiochannelmerger-basic.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Task: Checking constraints in ChannelMergerNode.
+ audit.define('exceptions-channels', (task, should) => {
+ let context = new OfflineAudioContext(2, 128, 44100);
+ let merger;
+
+ should(function() {
+ merger = context.createChannelMerger();
+ }, 'context.createChannelMerger()').notThrow();
+
+ should(function() {
+ merger = context.createChannelMerger(0);
+ }, 'context.createChannelMerger(0)').throw(DOMException, 'IndexSizeError');
+
+ should(function() {
+ merger = context.createChannelMerger(32);
+ }, 'context.createChannelMerger(32)').notThrow();
+
+ // Can't create a channel merger with 33 channels because the audio
+ // context has a 32-channel-limit in Chrome.
+ should(function() {
+ merger = context.createChannelMerger(33);
+ }, 'context.createChannelMerger(33)').throw(DOMException, 'IndexSizeError');
+
+ task.done();
+ });
+
+ // Task: checking the channel-related properties have the correct value
+ // and can't be changed.
+ audit.define('exceptions-properties', (task, should) => {
+ let context = new OfflineAudioContext(2, 128, 44100);
+ let merger = context.createChannelMerger();
+
+ should(merger.channelCount, 'merger.channelCount').beEqualTo(1);
+
+ should(function() {
+ merger.channelCount = 3;
+ }, 'merger.channelCount = 3').throw(DOMException, 'InvalidStateError');
+
+ should(merger.channelCountMode, 'merger.channelCountMode')
+ .beEqualTo('explicit');
+
+ should(function() {
+ merger.channelCountMode = 'max';
+ }, 'merger.channelCountMode = "max"').throw(DOMException, 'InvalidStateError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html
new file mode 100644
index 0000000000..ad74d5e004
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html
@@ -0,0 +1,82 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiochannelmerger-disconnect.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let renderQuantum = 128;
+
+ let numberOfChannels = 2;
+ let sampleRate = 44100;
+ let renderDuration = 0.5;
+ let disconnectTime = 0.5 * renderDuration;
+
+ let audit = Audit.createTaskRunner();
+
+ // Task: Check if the merger outputs a silent channel when an input is
+ // disconnected.
+ audit.define('silent-disconnect', (task, should) => {
+ let context = new OfflineAudioContext(
+ numberOfChannels, renderDuration * sampleRate, sampleRate);
+ let merger = context.createChannelMerger();
+ let source1 = context.createBufferSource();
+ let source2 = context.createBufferSource();
+
+ // Create and assign a constant buffer.
+ let bufferDCOffset = createConstantBuffer(context, 1, 1);
+ source1.buffer = source2.buffer = bufferDCOffset;
+ source1.loop = source2.loop = true;
+
+ // Connect the output of source into the 4th input of merger. The merger
+ // should produce 6 channel output.
+ source1.connect(merger, 0, 0);
+ source2.connect(merger, 0, 1);
+ merger.connect(context.destination);
+ source1.start();
+ source2.start();
+
+ // Schedule the disconnection of |source2| at the half of render
+ // duration.
+ context.suspend(disconnectTime).then(function() {
+ source2.disconnect();
+ context.resume();
+ });
+
+ context.startRendering()
+ .then(function(buffer) {
+ // The entire first channel of the output should be 1.
+ should(buffer.getChannelData(0), 'Channel #0')
+ .beConstantValueOf(1);
+
+ // Calculate the first zero index in the second channel.
+ let channel1 = buffer.getChannelData(1);
+ let disconnectIndex = disconnectTime * sampleRate;
+ disconnectIndex = renderQuantum *
+ Math.floor(
+ (disconnectIndex + renderQuantum - 1) / renderQuantum);
+ let firstZeroIndex = channel1.findIndex(function(element, index) {
+ if (element === 0)
+ return index;
+ });
+
+ // The second channel should contain 1, and 0 after the
+ // disconnection.
+ should(channel1, 'Channel #1').containValues([1, 0]);
+ should(
+ firstZeroIndex, 'The index of first zero in the channel #1')
+ .beEqualTo(disconnectIndex);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input-non-default.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input-non-default.html
new file mode 100644
index 0000000000..6fe77ab763
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input-non-default.html
@@ -0,0 +1,79 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiochannelmerger-input-non-default.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/merger-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+
+ // Task: Check if an inactive input renders a silent mono channel in the
+ // output.
+ audit.define('silent-channel', (task, should) => {
+ testMergerInput(should, {
+ numberOfChannels: 7,
+
+ // Create a mono source buffer filled with '1'.
+ testBufferContent: [1],
+
+ // Connect the output of source into the 7th input of merger.
+ mergerInputIndex: 6,
+
+ // 7th channel should be '1'.
+ expected: [0, 0, 0, 0, 0, 0, 1],
+ }).then(() => task.done());
+ });
+
+
+ // Task: Check if a stereo input is being down-mixed to mono channel
+ // correctly based on the mixing rule.
+ audit.define('stereo-down-mixing', (task, should) => {
+ testMergerInput(should, {
+ numberOfChannels: 7,
+
+ // Create a stereo buffer filled with '1' and '2' for left and right
+ // channels respectively.
+ testBufferContent: [1, 2],
+
+ // Connect the output of source into the 7th input of merger.
+ mergerInputIndex: 6,
+
+ // The result of summed and down-mixed stereo audio should be 1.5.
+ // (= 1 * 0.5 + 2 * 0.5)
+ expected: [0, 0, 0, 0, 0, 0, 1.5],
+ }).then(() => task.done());
+ });
+
+
+ // Task: Check if 3-channel input gets processed by the 'discrete' mixing
+ // rule.
+ audit.define('undefined-channel-layout', (task, should) => {
+ testMergerInput(should, {
+ numberOfChannels: 7,
+
+ // Create a 3-channel buffer filled with '1', '2', and '3'
+ // respectively.
+ testBufferContent: [1, 2, 3],
+
+ // Connect the output of source into the 7th input of merger.
+ mergerInputIndex: 6,
+
+ // The result of summed stereo audio should be 1 because 3-channel is
+ // not a canonical layout, so the input channel 2 and 3 should be
+ // dropped by 'discrete' mixing rule.
+ expected: [0, 0, 0, 0, 0, 0, 1],
+ }).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input.html
new file mode 100644
index 0000000000..66a70dcb3b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-input.html
@@ -0,0 +1,113 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ audiochannelmerger-input.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/merger-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Task: Check if an inactive input renders a silent mono channel in the
+ // output.
+ audit.define('silent-channel', (task, should) => {
+ testMergerInput(should, {
+ numberOfChannels: 6,
+
+ // Create a mono source buffer filled with '1'.
+ testBufferContent: [1],
+
+ // Connect the output of source into the 4th input of merger.
+ mergerInputIndex: 3,
+
+ // All channels should contain 0, except channel 4 which should be 1.
+ expected: [0, 0, 0, 1, 0, 0],
+ }).then(() => task.done());
+ });
+
+
+ // Task: Check if a stereo input is being down-mixed to mono channel
+ // correctly based on the mixing rule.
+ audit.define('stereo-down-mixing', (task, should) => {
+ testMergerInput(should, {
+ numberOfChannels: 6,
+
+ // Create a stereo buffer filled with '1' and '2' for left and right
+ // channels respectively.
+ testBufferContent: [1, 2],
+
+ // Connect the output of source into the 1st input of merger.
+ mergerInputIndex: undefined,
+
+ // The result of summed and down-mixed stereo audio should be 1.5.
+ // (= 1 * 0.5 + 2 * 0.5)
+ expected: [1.5, 0, 0, 0, 0, 0],
+ }).then(() => task.done());
+ });
+
+
+ // Task: Check if 3-channel input gets processed by the 'discrete' mixing
+ // rule.
+ audit.define('undefined-channel-layout', (task, should) => {
+ testMergerInput(should, {
+ numberOfChannels: 6,
+
+ // Create a 3-channel buffer filled with '1', '2', and '3'
+ // respectively.
+ testBufferContent: [1, 2, 3],
+
+ // Connect the output of source into the 1st input of merger.
+ mergerInputIndex: undefined,
+
+ // The result of summed stereo audio should be 1 because 3-channel is
+ // not a canonical layout, so the input channel 2 and 3 should be
+ // dropped by 'discrete' mixing rule.
+ expected: [1, 0, 0, 0, 0, 0],
+ }).then(() => task.done());
+ });
+
+
+ // Task: Merging two inputs into a single stereo stream.
+ audit.define('merging-to-stereo', (task, should) => {
+
+ // For this test, the number of channel should be 2.
+ let context = new OfflineAudioContext(2, 128, 44100);
+ let merger = context.createChannelMerger();
+ let source1 = context.createBufferSource();
+ let source2 = context.createBufferSource();
+
+ // Create a DC offset buffer (mono) filled with 1 and assign it to BS
+ // nodes.
+ let positiveDCOffset = createConstantBuffer(context, 128, 1);
+ let negativeDCOffset = createConstantBuffer(context, 128, -1);
+ source1.buffer = positiveDCOffset;
+ source2.buffer = negativeDCOffset;
+
+ // Connect: BS#1 => merger_input#0, BS#2 => Inverter => merger_input#1
+ source1.connect(merger, 0, 0);
+ source2.connect(merger, 0, 1);
+ merger.connect(context.destination);
+ source1.start();
+ source2.start();
+
+ context.startRendering().then(function(buffer) {
+
+ // Channel#0 = 1, Channel#1 = -1
+ should(buffer.getChannelData(0), 'Channel #0').beConstantValueOf(1);
+ should(buffer.getChannelData(1), 'Channel #1').beConstantValueOf(-1);
+
+ task.done();
+ });
+ });
+
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/ctor-channelmerger.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/ctor-channelmerger.html
new file mode 100644
index 0000000000..0d6b45c56d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/ctor-channelmerger.html
@@ -0,0 +1,112 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: ChannelMerger
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'ChannelMergerNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node =
+ testDefaultConstructor(should, 'ChannelMergerNode', context, {
+ prefix: prefix,
+ numberOfInputs: 6,
+ numberOfOutputs: 1,
+ channelCount: 1,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'speakers'
+ });
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'ChannelMergerNode', {
+ channelCount: {
+ value: 1,
+ isFixed: true,
+ exceptionType: 'InvalidStateError'
+ },
+ channelCountMode: {
+ value: 'explicit',
+ isFixed: true,
+ exceptionType: 'InvalidStateError'
+ }
+ });
+ task.done();
+ });
+
+ audit.define('constructor options', (task, should) => {
+ let node;
+ let options = {
+ numberOfInputs: 3,
+ numberOfOutputs: 9,
+ channelInterpretation: 'discrete'
+ };
+
+ should(
+ () => {
+ node = new ChannelMergerNode(context, options);
+ },
+ 'node1 = new ChannelMergerNode(context, ' +
+ JSON.stringify(options) + ')')
+ .notThrow();
+
+ should(node.numberOfInputs, 'node1.numberOfInputs')
+ .beEqualTo(options.numberOfInputs);
+ should(node.numberOfOutputs, 'node1.numberOfOutputs').beEqualTo(1);
+ should(node.channelInterpretation, 'node1.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ options = {numberOfInputs: 99};
+ should(
+ () => {
+ node = new ChannelMergerNode(context, options);
+ },
+ 'new ChannelMergerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'IndexSizeError');
+
+ options = {channelCount: 3};
+ should(
+ () => {
+ node = new ChannelMergerNode(context, options);
+ },
+ 'new ChannelMergerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+
+ options = {channelCountMode: 'max'};
+ should(
+ () => {
+ node = new ChannelMergerNode(context, options);
+ },
+ 'new ChannelMergerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/audiochannelsplitter.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/audiochannelsplitter.html
new file mode 100644
index 0000000000..954c71a96b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/audiochannelsplitter.html
@@ -0,0 +1,141 @@
+<!DOCTYPE html>
+<!--
+Tests that AudioChannelSplitter works correctly.
+-->
+<html>
+ <head>
+ <title>
+ audiochannelsplitter.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100.0;
+ let lengthInSampleFrames = 512;
+
+ let context = 0;
+ let sourceBuffer;
+ let sourceNode;
+ let channelSplitter;
+ let channelMerger;
+
+ function createStereoBufferWithDCOffset(length, sampleRate, offset) {
+ let buffer = context.createBuffer(2, length, sampleRate);
+ let n = buffer.length;
+ let channelL = buffer.getChannelData(0);
+ let channelR = buffer.getChannelData(1);
+
+ for (let i = 0; i < n; ++i) {
+ channelL[i] = offset;
+ channelR[i] = -1.0 * offset;
+ }
+
+ return buffer;
+ }
+
+ // checkResult() checks that the rendered buffer is stereo and that the
+ // left channel is all -1 and right channel all +1. In other words, we've
+ // reversed the order of the two channels.
+ function checkResult(buffer, should) {
+ let success = true;
+
+ if (buffer.numberOfChannels == 2) {
+ let bufferDataL = buffer.getChannelData(0);
+ let bufferDataR = buffer.getChannelData(1);
+
+ success = should(bufferDataL, 'Left channel').beConstantValueOf(-1) &&
+ success;
+ success = should(bufferDataR, 'Right channel').beConstantValueOf(1) &&
+ success;
+ } else {
+ success = false;
+ }
+
+ should(success, 'Left and right channels were exchanged')
+ .message('correctly', 'incorrectly');
+ }
+
+ audit.define(
+ {
+ label: 'construction',
+ description: 'Construction of ChannelSplitterNode'
+ },
+ function(task, should) {
+
+ // Create stereo offline audio context.
+ context =
+ new OfflineAudioContext(2, lengthInSampleFrames, sampleRate);
+
+ let splitternode;
+ should(() => {
+ let splitternode = context.createChannelSplitter(0);
+ }, 'createChannelSplitter(0)').throw(DOMException, 'IndexSizeError');
+
+ should(() => {
+ splitternode = context.createChannelSplitter(33);
+ }, 'createChannelSplitter(33)').throw(DOMException, 'IndexSizeError');
+
+ should(() => {
+ splitternode = context.createChannelSplitter(32);
+ }, 'splitternode = context.createChannelSplitter(32)').notThrow();
+
+ should(splitternode.numberOfOutputs, 'splitternode.numberOfOutputs')
+ .beEqualTo(32);
+ should(splitternode.numberOfInputs, 'splitternode.numberOfInputs')
+ .beEqualTo(1)
+
+ should(() => {
+ splitternode = context.createChannelSplitter();
+ }, 'splitternode = context.createChannelSplitter()').notThrow();
+
+ should(splitternode.numberOfOutputs, 'splitternode.numberOfOutputs')
+ .beEqualTo(6);
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'functionality',
+ description: 'Functionality of ChannelSplitterNode'
+ },
+ function(task, should) {
+
+ // Create a stereo buffer, with all +1 values in left channel, all
+ // -1 in right channel.
+ sourceBuffer = createStereoBufferWithDCOffset(
+ lengthInSampleFrames, sampleRate, 1);
+
+ sourceNode = context.createBufferSource();
+ sourceNode.buffer = sourceBuffer;
+
+ // Create a channel splitter and connect it so that it split the
+ // stereo stream into two mono streams.
+ channelSplitter = context.createChannelSplitter(2);
+ sourceNode.connect(channelSplitter);
+
+ // Create a channel merger to merge the output of channel splitter.
+ channelMerger = context.createChannelMerger();
+ channelMerger.connect(context.destination);
+
+ // When merging, exchange channel layout: left->right, right->left
+ channelSplitter.connect(channelMerger, 0, 1);
+ channelSplitter.connect(channelMerger, 1, 0);
+
+ sourceNode.start(0);
+
+ context.startRendering()
+ .then(buffer => checkResult(buffer, should))
+ .then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/ctor-channelsplitter.html b/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/ctor-channelsplitter.html
new file mode 100644
index 0000000000..b7165bac33
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/ctor-channelsplitter.html
@@ -0,0 +1,115 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: ChannelSplitter
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'ChannelSplitterNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ testDefaultConstructor(should, 'ChannelSplitterNode', context, {
+ prefix: 'node0',
+ numberOfInputs: 1,
+ numberOfOutputs: 6,
+ channelCount: 6,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'discrete'
+ });
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'ChannelSplitterNode', {
+ channelCount: {
+ value: 6,
+ isFixed: true,
+ exceptionType: 'InvalidStateError'
+ },
+ channelCountMode: {
+ value: 'explicit',
+ isFixed: true,
+ exceptionType: 'InvalidStateError'
+ },
+ channelInterpretation: {
+ value: 'discrete',
+ isFixed: true,
+ exceptionType: 'InvalidStateError'
+ },
+ });
+ task.done();
+ });
+
+ audit.define('constructor options', (task, should) => {
+ let node;
+ let options = {
+ numberOfInputs: 3,
+ numberOfOutputs: 9,
+ channelInterpretation: 'discrete'
+ };
+
+ should(
+ () => {
+ node = new ChannelSplitterNode(context, options);
+ },
+ 'node1 = new ChannelSplitterNode(context, ' +
+ JSON.stringify(options) + ')')
+ .notThrow();
+
+ should(node.numberOfInputs, 'node1.numberOfInputs').beEqualTo(1);
+ should(node.numberOfOutputs, 'node1.numberOfOutputs')
+ .beEqualTo(options.numberOfOutputs);
+ should(node.channelInterpretation, 'node1.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ options = {numberOfOutputs: 99};
+ should(
+ () => {
+ node = new ChannelSplitterNode(context, options);
+ },
+ 'new ChannelSplitterNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'IndexSizeError');
+
+ options = {channelCount: 3};
+ should(
+ () => {
+ node = new ChannelSplitterNode(context, options);
+ },
+ 'new ChannelSplitterNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+
+ options = {channelCountMode: 'max'};
+ should(
+ () => {
+ node = new ChannelSplitterNode(context, options);
+ },
+ 'new ChannelSplitterNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-basic.html
new file mode 100644
index 0000000000..4f925df5cd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-basic.html
@@ -0,0 +1,85 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Basic ConstantSourceNode Tests
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/start-stop-exceptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context = new AudioContext();
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('createConstantSource()', (task, should) => {
+ let node;
+ let prefix = 'Factory method: ';
+
+ should(() => {
+ node = context.createConstantSource();
+ }, prefix + 'node = context.createConstantSource()').notThrow();
+ should(
+ node instanceof ConstantSourceNode,
+ prefix + 'node instance of ConstantSourceNode')
+ .beEqualTo(true);
+
+ verifyNodeDefaults(should, node, prefix);
+
+ task.done();
+ });
+
+ audit.define('new ConstantSourceNode()', (task, should) => {
+ let node;
+ let prefix = 'Constructor: ';
+
+ should(() => {
+ node = new ConstantSourceNode(context);
+ }, prefix + 'node = new ConstantSourceNode()').notThrow();
+ should(
+ node instanceof ConstantSourceNode,
+ prefix + 'node instance of ConstantSourceNode')
+ .beEqualTo(true);
+
+
+ verifyNodeDefaults(should, node, prefix);
+
+ task.done();
+ });
+
+ audit.define('start/stop exceptions', (task, should) => {
+ let node = new ConstantSourceNode(context);
+
+ testStartStop(should, node);
+ task.done();
+ });
+
+ function verifyNodeDefaults(should, node, prefix) {
+ should(node.numberOfInputs, prefix + 'node.numberOfInputs')
+ .beEqualTo(0);
+ should(node.numberOfOutputs, prefix + 'node.numberOfOutputs')
+ .beEqualTo(1);
+ should(node.channelCount, prefix + 'node.channelCount').beEqualTo(2);
+ should(node.channelCountMode, prefix + 'node.channelCountMode')
+ .beEqualTo('max');
+ should(
+ node.channelInterpretation, prefix + 'node.channelInterpretation')
+ .beEqualTo('speakers');
+
+ should(node.offset.value, prefix + 'node.offset.value').beEqualTo(1);
+ should(node.offset.defaultValue, prefix + 'node.offset.defaultValue')
+ .beEqualTo(1);
+ should(node.offset.minValue, prefix + 'node.offset.minValue')
+ .beEqualTo(Math.fround(-3.4028235e38));
+ should(node.offset.maxValue, prefix + 'node.offset.maxValue')
+ .beEqualTo(Math.fround(3.4028235e38));
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-onended.html b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-onended.html
new file mode 100644
index 0000000000..64bc54f21b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-onended.html
@@ -0,0 +1,38 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test ConstantSourceNode onended
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 44100.0;
+ // Number of frames that the source will run; fairly arbitrary
+ let numberOfFrames = 32;
+ // Number of frames to render; arbitrary, but should be larger than
+ // numberOfFrames;
+ let renderFrames = 16 * numberOfFrames;
+
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let src = new ConstantSourceNode(context);
+ src.connect(context.destination);
+
+ let tester = async_test('ConstantSourceNode onended event fired');
+
+ src.onended = function() {
+ tester.step(function() {
+ assert_true(true, 'ConstantSourceNode.onended fired');
+ });
+ tester.done();
+ };
+
+ src.start();
+ src.stop(numberOfFrames / context.sampleRate);
+
+ context.startRendering();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-output.html b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-output.html
new file mode 100644
index 0000000000..5990376cff
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/constant-source-output.html
@@ -0,0 +1,207 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test ConstantSourceNode Output
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/audioparam-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let renderDuration = 0.125;
+ let renderFrames = sampleRate * renderDuration;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('constant source', (task, should) => {
+ // Verify a constant source outputs the correct (fixed) constant.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let node = new ConstantSourceNode(context, {offset: 0.5});
+ node.connect(context.destination);
+ node.start();
+
+ context.startRendering()
+ .then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ let expected = new Float32Array(actual.length);
+ expected.fill(node.offset.value);
+
+ should(actual, 'Basic: ConstantSourceNode({offset: 0.5})')
+ .beEqualToArray(expected);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('stop before start', (task, should) => {
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let node = new ConstantSourceNode(context, {offset: 1});
+ node.connect(context.destination);
+ node.start(61 / context.sampleRate);
+ node.stop(31 / context.sampleRate);
+
+ context.startRendering()
+ .then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ should(actual,
+ "ConstantSourceNode with stop before " +
+ "start must output silence")
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('stop equal to start', (task, should) => {
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let node = new ConstantSourceNode(context, {offset: 1});
+ node.connect(context.destination);
+ node.start(31 / context.sampleRate);
+ node.stop(31 / context.sampleRate);
+
+ context.startRendering()
+ .then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ should(actual,
+ "ConstantSourceNode with stop equal to start " +
+ " must output silence")
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('start/stop', (task, should) => {
+ // Verify a constant source starts and stops at the correct time and has
+ // the correct (fixed) value.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let node = new ConstantSourceNode(context, {offset: 1});
+ node.connect(context.destination);
+
+ let startFrame = 10;
+ let stopFrame = 300;
+
+ node.start(startFrame / context.sampleRate);
+ node.stop(stopFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ let expected = new Float32Array(actual.length);
+ // The expected output is all 1s from start to stop time.
+ expected.fill(0);
+
+ for (let k = startFrame; k < stopFrame; ++k) {
+ expected[k] = node.offset.value;
+ }
+
+ let prefix = 'start/stop: ';
+ should(actual.slice(0, startFrame),
+ prefix + 'ConstantSourceNode frames [0, ' +
+ startFrame + ')')
+ .beConstantValueOf(0);
+
+ should(actual.slice(startFrame, stopFrame),
+ prefix + 'ConstantSourceNode frames [' +
+ startFrame + ', ' + stopFrame + ')')
+ .beConstantValueOf(1);
+
+ should(
+ actual.slice(stopFrame),
+ prefix + 'ConstantSourceNode frames [' + stopFrame +
+ ', ' + renderFrames + ')')
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+
+ });
+
+ audit.define('basic automation', (task, should) => {
+ // Verify that automation works as expected.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let source = context.createConstantSource();
+ source.connect(context.destination);
+
+ let rampEndTime = renderDuration / 2;
+ source.offset.setValueAtTime(0.5, 0);
+ source.offset.linearRampToValueAtTime(1, rampEndTime);
+
+ source.start();
+
+ context.startRendering()
+ .then(function(buffer) {
+ let actual = buffer.getChannelData(0);
+ let expected = createLinearRampArray(
+ 0, rampEndTime, 0.5, 1, context.sampleRate);
+
+ let rampEndFrame = Math.ceil(rampEndTime * context.sampleRate);
+ let prefix = 'Automation: ';
+
+ should(actual.slice(0, rampEndFrame),
+ prefix + 'ConstantSourceNode.linearRamp(1, 0.5)')
+ .beCloseToArray(expected, {
+ // Experimentally determined threshold.
+ relativeThreshold: 7.1610e-7
+ });
+
+ should(actual.slice(rampEndFrame),
+ prefix + 'ConstantSourceNode after ramp')
+ .beConstantValueOf(1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('connected audioparam', (task, should) => {
+ // Verify the constant source output with connected AudioParam produces
+ // the correct output.
+ let context = new OfflineAudioContext(2, renderFrames, sampleRate)
+ context.destination.channelInterpretation = 'discrete';
+ let source = new ConstantSourceNode(context, {offset: 1});
+ let osc = context.createOscillator();
+ let merger = context.createChannelMerger(2);
+ merger.connect(context.destination);
+
+ source.connect(merger, 0, 0);
+ osc.connect(merger, 0, 1);
+ osc.connect(source.offset);
+
+ osc.start();
+ let sourceStartFrame = 10;
+ source.start(sourceStartFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(function(buffer) {
+ // Channel 0 and 1 should be identical, except channel 0 (the
+ // source) is silent at the beginning.
+ let actual = buffer.getChannelData(0);
+ let expected = buffer.getChannelData(1);
+ // The expected output should be oscillator + 1 because offset
+ // is 1.
+ expected = expected.map(x => 1 + x);
+ let prefix = 'Connected param: ';
+
+ // The initial part of the output should be silent because the
+ // source node hasn't started yet.
+ should(
+ actual.slice(0, sourceStartFrame),
+ prefix + 'ConstantSourceNode frames [0, ' + sourceStartFrame +
+ ')')
+ .beConstantValueOf(0);
+ // The rest of the output should be the same as the oscillator (in
+ // channel 1)
+ should(
+ actual.slice(sourceStartFrame),
+ prefix + 'ConstantSourceNode frames [' + sourceStartFrame +
+ ', ' + renderFrames + ')')
+ .beCloseToArray(expected.slice(sourceStartFrame), 0);
+
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/ctor-constantsource.html b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/ctor-constantsource.html
new file mode 100644
index 0000000000..ea4a65e146
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/ctor-constantsource.html
@@ -0,0 +1,50 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: ConstantSource
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'ConstantSourceNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node =
+ testDefaultConstructor(should, 'ConstantSourceNode', context, {
+ prefix: prefix,
+ numberOfInputs: 0,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(
+ should, node, prefix, [{name: 'offset', value: 1}]);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html
new file mode 100644
index 0000000000..04f61106c1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html
@@ -0,0 +1,135 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test the ConstantSourceNode Interface</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+test(function(t) {
+ var ac = new AudioContext();
+
+ var csn = ac.createConstantSource();
+ assert_equals(csn.offset.value, 1.0, "Default offset is 1.0");
+
+ csn = new ConstantSourceNode(ac);
+ assert_equals(csn.offset.value, 1.0, "Default offset is 1.0");
+
+ csn = new ConstantSourceNode(ac, {offset: -0.25});
+ assert_equals(csn.offset.value, -0.25, "Offset can be set during construction");
+}, "ConstantSourceNode can be constructed");
+
+test(function(t) {
+ var ac = new AudioContext();
+
+ var csn = ac.createConstantSource();
+
+ assert_throws_dom("InvalidStateError", function() {
+ csn.stop(1);
+ }, "Start must be called before stop");
+
+ assert_throws_js(RangeError, function() {
+ csn.start(-1);
+ }, "When can not be negative");
+
+ csn.start(0);
+ assert_throws_js(RangeError, function() {
+ csn.stop(-1);
+ }, "When can not be negative");
+}, "ConstantSourceNode stop and start");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+ var csn = ac.createConstantSource();
+ csn.connect(ac.destination);
+ csn.start()
+ csn.stop(1024/44100)
+ csn.onended = function(e) {
+ t.step(function() {
+ assert_equals(e.type, "ended", "Event type should be 'ended', received: " + e.type);
+ });
+ t.done();
+ }
+ ac.startRendering();
+}, "ConstantSourceNode onended event");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+ var csn = ac.createConstantSource();
+ csn.connect(ac.destination);
+ csn.start(512/44100)
+ csn.stop(1024/44100)
+
+ ac.oncomplete = function(e) {
+ t.step(function() {
+ var result = e.renderedBuffer.getChannelData(0);
+ for (var i = 0; i < 2048; ++i) {
+ if (i >= 512 && i < 1024) {
+ assert_equals(result[i], 1.0, "sample " + i + " should equal 1.0");
+ } else {
+ assert_equals(result[i], 0.0, "sample " + i + " should equal 0.0");
+ }
+ }
+ });
+ t.done();
+ }
+
+ ac.startRendering();
+}, "ConstantSourceNode start and stop when work");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+ var csn = ac.createConstantSource();
+ csn.offset.value = 0.25;
+ csn.connect(ac.destination);
+ csn.start()
+
+ ac.oncomplete = function(e) {
+ t.step(function() {
+ var result = e.renderedBuffer.getChannelData(0);
+ for (var i = 0; i < 2048; ++i) {
+ assert_equals(result[i], 0.25, "sample " + i + " should equal 0.25");
+ }
+ });
+ t.done();
+ }
+
+ ac.startRendering();
+}, "ConstantSourceNode with no automation");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+
+ var timeConstant = 2.0;
+ var offsetStart = 0.25;
+ var offsetEnd = 0.1;
+
+ var csn = ac.createConstantSource();
+ csn.offset.value = offsetStart;
+ csn.offset.setTargetAtTime(offsetEnd, 1024/ac.sampleRate, timeConstant);
+ csn.connect(ac.destination);
+ csn.start()
+
+ ac.oncomplete = function(e) {
+ t.step(function() {
+ // create buffer with expected values
+ var buffer = ac.createBuffer(1, 2048, ac.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ if (i < 1024) {
+ buffer.getChannelData(0)[i] = offsetStart;
+ } else {
+ time = (i-1024)/ac.sampleRate;
+ buffer.getChannelData(0)[i] = offsetEnd + (offsetStart - offsetEnd)*Math.exp(-time/timeConstant);
+ }
+ }
+
+ var result = e.renderedBuffer.getChannelData(0);
+ var expected = buffer.getChannelData(0);
+ for (var i = 0; i < 2048; ++i) {
+ assert_true(Math.abs(result[i] - expected[i]) < 1.342e-6, "sample " + i + " should equal " + expected[i]);
+ }
+ });
+ t.done();
+ }
+
+ ac.startRendering();
+}, "ConstantSourceNode with automation");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html
new file mode 100644
index 0000000000..f0f9f771bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html
@@ -0,0 +1,93 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Active Processing for ConvolverNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script id="layout-test-code">
+ // AudioProcessor that sends a message to its AudioWorkletNode whenver the
+ // number of channels on its input changes.
+ let filePath =
+ '../the-audioworklet-interface/processors/active-processing.js';
+
+ const audit = Audit.createTaskRunner();
+
+ let context;
+
+ audit.define('initialize', (task, should) => {
+ // Create context and load the module
+ context = new AudioContext();
+ should(
+ context.audioWorklet.addModule(filePath),
+ 'AudioWorklet module loading')
+ .beResolved()
+ .then(() => task.done());
+ });
+
+ audit.define('test', (task, should) => {
+ const src = new OscillatorNode(context);
+
+ const response = new AudioBuffer({numberOfChannels: 2, length: 150,
+ sampleRate: context.sampleRate});
+
+ const conv = new ConvolverNode(context, {buffer: response});
+
+ const testerNode =
+ new AudioWorkletNode(context, 'active-processing-tester', {
+ // Use as short a duration as possible to keep the test from
+ // taking too much time.
+ processorOptions: {testDuration: .5},
+ });
+
+ // Expected number of output channels from the convolver node. We should
+ // start with the number of inputs, because the source (oscillator) is
+ // actively processing. When the source stops, the number of channels
+ // should change to 1.
+ const expectedValues = [2, 1];
+ let index = 0;
+
+ testerNode.port.onmessage = event => {
+ let count = event.data.channelCount;
+ let finished = event.data.finished;
+
+ // If we're finished, end testing.
+ if (finished) {
+ // Verify that we got the expected number of changes.
+ should(index, 'Number of distinct values')
+ .beEqualTo(expectedValues.length);
+
+ task.done();
+ return;
+ }
+
+ if (index < expectedValues.length) {
+ // Verify that the number of channels matches the expected number of
+ // channels.
+ should(count, `Test ${index}: Number of convolver output channels`)
+ .beEqualTo(expectedValues[index]);
+ }
+
+ ++index;
+ };
+
+ // Create the graph and go
+ src.connect(conv).connect(testerNode).connect(context.destination);
+ src.start();
+
+ // Stop the source after a short time so we can test that the convolver
+ // changes to not actively processing and thus produces a single channel
+ // of silence.
+ src.stop(context.currentTime + .1);
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolution-mono-mono.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolution-mono-mono.html
new file mode 100644
index 0000000000..570efebe22
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolution-mono-mono.html
@@ -0,0 +1,62 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ convolution-mono-mono.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/convolution-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // description("Tests ConvolverNode processing a mono channel with mono
+ // impulse response.");
+
+ // To test the convolver, we convolve two square pulses together to
+ // produce a triangular pulse. To verify the result is correct we
+ // check several parts of the result. First, we make sure the initial
+ // part of the result is zero (due to the latency in the convolver).
+ // Next, the triangular pulse should match the theoretical result to
+ // within some roundoff. After the triangular pulse, the result
+ // should be exactly zero, but round-off prevents that. We make sure
+ // the part after the pulse is sufficiently close to zero. Finally,
+ // the result should be exactly zero because the inputs are exactly
+ // zero.
+ audit.define('test', function(task, should) {
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ let squarePulse = createSquarePulseBuffer(context, pulseLengthFrames);
+ let trianglePulse =
+ createTrianglePulseBuffer(context, 2 * pulseLengthFrames);
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = squarePulse;
+
+ let convolver = context.createConvolver();
+ convolver.normalize = false;
+ convolver.buffer = squarePulse;
+
+ bufferSource.connect(convolver);
+ convolver.connect(context.destination);
+
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => {
+ checkConvolvedResult(buffer, trianglePulse, should);
+ })
+ .then(task.done.bind(task));
+ ;
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-cascade.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-cascade.html
new file mode 100644
index 0000000000..20bdfbdf4e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-cascade.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Cascade of Mono Convolvers
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // Arbitrary sample rate and reasonably short duration
+ let sampleRate = 8000;
+ let duration = 0.25;
+ let renderFrames = duration * sampleRate;
+
+ audit.define(
+ {label: 'cascade-mono', description: 'Cascaded mono convolvers'},
+ (task, should) => {
+ // Cascade two convolvers with mono responses and verify that the
+ // output is not silent.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+
+ let b0 =
+ new AudioBuffer({length: 5, sampleRate: context.sampleRate});
+ b0.getChannelData(0)[1] = 1;
+ let c0 = new ConvolverNode(
+ context, {disableNormalization: true, buffer: b0});
+
+ let b1 =
+ new AudioBuffer({length: 5, sampleRate: context.sampleRate});
+ b1.getChannelData(0)[2] = 1;
+
+ let c1 = new ConvolverNode(
+ context, {disableNormalization: true, buffer: b1});
+
+ let src = new OscillatorNode(context);
+
+ src.connect(c0).connect(c1).connect(context.destination);
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ // Just verify the output is not silent
+ let audio = audioBuffer.getChannelData(0);
+
+ should(audio, 'Output of cascaded mono convolvers')
+ .notBeConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-channels.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-channels.html
new file mode 100644
index 0000000000..ac4f198d7c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-channels.html
@@ -0,0 +1,43 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Supported Number of Channels for ConvolverNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('channel-count-test', (task, should) => {
+ // Just need a context to create nodes on, so any allowed length and
+ // rate is ok.
+ let context = new OfflineAudioContext(1, 1, 48000);
+
+ let success = true;
+
+ for (let count = 1; count <= 32; ++count) {
+ let convolver = context.createConvolver();
+ let buffer = context.createBuffer(count, 1, context.sampleRate);
+ let message = 'ConvolverNode with buffer of ' + count + ' channels';
+
+ if (count == 1 || count == 2 || count == 4) {
+ // These are the only valid channel counts for the buffer.
+ should(() => convolver.buffer = buffer, message).notThrow();
+ } else {
+ should(() => convolver.buffer = buffer, message)
+ .throw(DOMException, 'NotSupportedError');
+ }
+ }
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html
new file mode 100644
index 0000000000..300b43622b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html
@@ -0,0 +1,406 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Convolver Channel Outputs for Response with 1 channel
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Test various convolver configurations when the convolver response has
+ // one channel (mono).
+
+ // This is somewhat arbitrary. It is the minimum value for which tests
+ // pass with both FFmpeg and KISS FFT implementations for 256 points.
+ // The value was similar for each implementation.
+ const absoluteThreshold = Math.pow(2, -21);
+
+ // Fairly arbitrary sample rate, except that we want the rate to be a
+ // power of two so that 1/sampleRate is exactly respresentable as a
+ // single-precision float.
+ let sampleRate = 8192;
+
+ // A fairly arbitrary number of frames, except the number of frames should
+ // be more than a few render quanta.
+ let renderFrames = 10 * 128;
+
+ let audit = Audit.createTaskRunner();
+
+ // Convolver response
+ let response;
+
+ audit.define(
+ {
+ label: 'initialize',
+ description: 'Convolver response with one channel'
+ },
+ (task, should) => {
+ // Convolver response
+ should(
+ () => {
+ response = new AudioBuffer(
+ {numberOfChannels: 1, length: 2, sampleRate: sampleRate});
+ response.getChannelData(0)[1] = 1;
+ },
+ 'new AudioBuffer({numberOfChannels: 1, length: 2, sampleRate: ' +
+ sampleRate + '})')
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define(
+ {label: '1-channel input', description: 'produces 1-channel output'},
+ (task, should) => {
+ // Create a 3-channel context: channel 0 = convolver under test,
+ // channel 1: test that convolver output is not stereo, channel 2:
+ // expected output. The context MUST be discrete so that the
+ // channels don't get mixed in some unexpected way.
+ let context = new OfflineAudioContext(3, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ let src = new OscillatorNode(context);
+ let conv = new ConvolverNode(
+ context, {disableNormalization: true, buffer: response});
+
+ // Splitter node to verify that the output of the convolver is mono.
+ // channelInterpretation must be 'discrete' so we don't do any
+ // mixing of the input to the node.
+ let splitter = new ChannelSplitterNode(
+ context,
+ {numberOfOutputs: 2, channelInterpretation: 'discrete'});
+
+ // Final merger to feed all of the individual channels into the
+ // destination.
+ let merger = new ChannelMergerNode(context, {numberOfInputs: 3});
+
+ src.connect(conv).connect(splitter);
+ splitter.connect(merger, 0, 0);
+ splitter.connect(merger, 1, 1);
+
+ // The convolver response is a 1-sample delay. Use a delay node to
+ // implement this.
+ let delay =
+ new DelayNode(context, {delayTime: 1 / context.sampleRate});
+ src.connect(delay);
+ delay.connect(merger, 0, 2);
+
+ merger.connect(context.destination);
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ // Extract out the three channels
+ let actual = audioBuffer.getChannelData(0);
+ let c1 = audioBuffer.getChannelData(1);
+ let expected = audioBuffer.getChannelData(2);
+
+ // c1 is expected to be zero.
+ should(c1, '1: Channel 1').beConstantValueOf(0);
+
+ // The expected and actual results should be identical
+ should(actual, 'Convolver output')
+ .beCloseToArray(expected,
+ {absoluteThreshold: absoluteThreshold});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: '2-channel input', description: 'produces 2-channel output'},
+ (task, should) => {
+ downMixTest({numberOfInputs: 2, prefix: '2'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '3-channel input',
+ description: '3->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ downMixTest({numberOfInputs: 3, prefix: '3'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '4-channel input',
+ description: '4->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ downMixTest({numberOfInputs: 4, prefix: '4'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '5.1-channel input',
+ description: '5.1->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ // Scale tolerance by maximum amplitude expected in down-mix
+ // output.
+ let threshold = (1.0 + Math.sqrt(0.5) * 2) * absoluteThreshold;
+
+ downMixTest({numberOfInputs: 6, prefix: '5.1',
+ absoluteThreshold: threshold}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '3-channel input, explicit',
+ description: '3->2 explicit downmix producing 2-channel output'
+ },
+ (task, should) => {
+ downMixTest(
+ {
+ channelCountMode: 'explicit',
+ numberOfInputs: 3,
+ prefix: '3 chan downmix explicit'
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '4-channel input, explicit',
+ description: '4->2 explicit downmix producing 2-channel output'
+ },
+ (task, should) => {
+ downMixTest(
+ {
+ channelCountMode: 'explicit',
+ numberOfInputs: 4,
+ prefix: '4 chan downmix explicit'
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '5.1-channel input, explicit',
+ description: '5.1->2 explicit downmix producing 2-channel output'
+ },
+ (task, should) => {
+ // Scale tolerance by maximum amplitude expected in down-mix
+ // output.
+ let threshold = (1.0 + Math.sqrt(0.5) * 2) * absoluteThreshold;
+
+ downMixTest(
+ {
+ channelCountMode: 'explicit',
+ numberOfInputs: 6,
+ prefix: '5.1 chan downmix explicit',
+ absoluteThreshold: threshold
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'mono-upmix-explicit',
+ description: '1->2 upmix, count mode explicit'
+ },
+ (task, should) => {
+ upMixTest(should, {channelCountMode: 'explicit'})
+ .then(buffer => {
+ let length = buffer.length;
+ let input = buffer.getChannelData(0);
+ let out0 = buffer.getChannelData(1);
+ let out1 = buffer.getChannelData(2);
+
+ // The convolver is basically a one-sample delay. Verify that
+ // that each channel is delayed by one sample.
+ should(out0.slice(1), '1->2 explicit upmix: channel 0')
+ .beCloseToArray(
+ input.slice(0, length - 1),
+ {absoluteThreshold: absoluteThreshold});
+ should(out1.slice(1), '1->2 explicit upmix: channel 1')
+ .beCloseToArray(
+ input.slice(0, length - 1),
+ {absoluteThreshold: absoluteThreshold});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'mono-upmix-clamped-max',
+ description: '1->2 upmix, count mode clamped-max'
+ },
+ (task, should) => {
+ upMixTest(should, {channelCountMode: 'clamped-max'})
+ .then(buffer => {
+ let length = buffer.length;
+ let input = buffer.getChannelData(0);
+ let out0 = buffer.getChannelData(1);
+ let out1 = buffer.getChannelData(2);
+
+ // The convolver is basically a one-sample delay. With a mono
+ // input, the count set to 2, and a mode of 'clamped-max', the
+ // output should be mono
+ should(out0.slice(1), '1->2 clamped-max upmix: channel 0')
+ .beCloseToArray(
+ input.slice(0, length - 1),
+ {absoluteThreshold: absoluteThreshold});
+ should(out1, '1->2 clamped-max upmix: channel 1')
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ function downMixTest(options, should) {
+ // Create an 4-channel offline context. The first two channels are for
+ // the stereo output of the convolver and the next two channels are for
+ // the reference stereo signal.
+ let context = new OfflineAudioContext(4, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ // Create oscillators for use as the input. The type and frequency is
+ // arbitrary except that oscillators must be different.
+ let src = new Array(options.numberOfInputs);
+ for (let k = 0; k < src.length; ++k) {
+ src[k] = new OscillatorNode(
+ context, {type: 'square', frequency: 440 + 220 * k});
+ }
+
+ // Merger to combine the oscillators into one output stream.
+ let srcMerger =
+ new ChannelMergerNode(context, {numberOfInputs: src.length});
+
+ for (let k = 0; k < src.length; ++k) {
+ src[k].connect(srcMerger, 0, k);
+ }
+
+ // Convolver under test.
+ let conv = new ConvolverNode(context, {
+ disableNormalization: true,
+ buffer: response,
+ channelCountMode: options.channelCountMode
+ });
+ srcMerger.connect(conv);
+
+ // Splitter to get individual channels of the convolver output so we can
+ // feed them (eventually) to the context in the right set of channels.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ conv.connect(splitter);
+
+ // Reference graph consists of a delay node to simulate the response of
+ // the convolver. (The convolver response is designed this way.)
+ let delay = new DelayNode(context, {delayTime: 1 / context.sampleRate});
+
+ // Gain node to mix the sources to stereo in the desired way. (Could be
+ // done in the delay node, but let's keep the mixing separated from the
+ // functionality.)
+ let gainMixer = new GainNode(
+ context, {channelCount: 2, channelCountMode: 'explicit'});
+ srcMerger.connect(gainMixer);
+
+ // Splitter to extract the channels of the reference signal.
+ let refSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ gainMixer.connect(delay).connect(refSplitter);
+
+ // Final merger to bring back the individual channels from the convolver
+ // and the reference in the right order for the destination.
+ let finalMerger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ // First two channels are for the convolver output, and the next two are
+ // for the reference.
+ splitter.connect(finalMerger, 0, 0);
+ splitter.connect(finalMerger, 1, 1);
+ refSplitter.connect(finalMerger, 0, 2);
+ refSplitter.connect(finalMerger, 1, 3);
+
+ finalMerger.connect(context.destination);
+
+ // Start the sources at last.
+ for (let k = 0; k < src.length; ++k) {
+ src[k].start();
+ }
+
+ return context.startRendering().then(audioBuffer => {
+ // Extract the various channels out
+ let actual0 = audioBuffer.getChannelData(0);
+ let actual1 = audioBuffer.getChannelData(1);
+ let expected0 = audioBuffer.getChannelData(2);
+ let expected1 = audioBuffer.getChannelData(3);
+
+ let threshold = options.absoluteThreshold ?
+ options.absoluteThreshold : absoluteThreshold;
+
+ // Verify that each output channel of the convolver matches
+ // the delayed signal from the reference
+ should(actual0, options.prefix + ': Channel 0')
+ .beCloseToArray(expected0, {absoluteThreshold: threshold});
+ should(actual1, options.prefix + ': Channel 1')
+ .beCloseToArray(expected1, {absoluteThreshold: threshold});
+ });
+ }
+
+ function upMixTest(should, options) {
+ // Offline context with 3 channels: 0 = source
+ // 1 = convolver output, left, 2 = convolver output, right. Context
+ // destination must be discrete so that channels don't get mixed in
+ // unexpected ways.
+ let context = new OfflineAudioContext(3, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.maxChannelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ // Mono response for convolver. Just a simple 1-frame delay.
+ let response =
+ new AudioBuffer({length: 2, sampleRate: context.sampleRate});
+ response.getChannelData(0)[1] = 1;
+
+ // Set mode to explicit and count to 2 so we manually force the
+ // convolver to produce stereo output. Without this, it would be
+ // mono input with mono response, which produces a mono output.
+ let conv;
+
+ should(
+ () => {conv = new ConvolverNode(context, {
+ buffer: response,
+ disableNormalization: true,
+ channelCount: 2,
+ channelCountMode: options.channelCountMode
+ })},
+ `new ConvolverNode({channelCountMode: '${
+ options.channelCountMode}'})`)
+ .notThrow();
+
+ // Split output of convolver into individual channels.
+ let convSplit = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ src.connect(conv);
+ conv.connect(convSplit);
+
+ // Connect signals to destination in the desired way.
+ src.connect(merger, 0, 0);
+ convSplit.connect(merger, 0, 1);
+ convSplit.connect(merger, 1, 2);
+
+ src.start();
+
+ return context.startRendering();
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-2-chan.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-2-chan.html
new file mode 100644
index 0000000000..9baf5f9f8d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-2-chan.html
@@ -0,0 +1,373 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Convolver Channel Outputs for Response with 2 channels
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Test various convolver configurations when the convolver response has
+ // a stereo response.
+
+ // This is somewhat arbitrary. It is the minimum value for which tests
+ // pass with both FFmpeg and KISS FFT implementations for 256 points.
+ // The value was similar for each implementation.
+ const absoluteThreshold = Math.pow(2, -21);
+
+ // Fairly arbitrary sample rate, except that we want the rate to be a
+ // power of two so that 1/sampleRate is exactly respresentable as a
+ // single-precision float.
+ let sampleRate = 8192;
+
+ // A fairly arbitrary number of frames, except the number of frames should
+ // be more than a few render quanta.
+ let renderFrames = 10 * 128;
+
+ let audit = Audit.createTaskRunner();
+
+ // Convolver response
+ let response;
+
+ audit.define(
+ {
+ label: 'initialize',
+ description: 'Convolver response with one channel'
+ },
+ (task, should) => {
+ // Convolver response
+ should(
+ () => {
+ response = new AudioBuffer(
+ {numberOfChannels: 2, length: 4, sampleRate: sampleRate});
+ // Each channel of the response is a simple impulse (with
+ // different delay) so that we can use a DelayNode to simulate
+ // the convolver output. Channel k is delayed by k+1 frames.
+ for (let k = 0; k < response.numberOfChannels; ++k) {
+ response.getChannelData(k)[k + 1] = 1;
+ }
+ },
+ 'new AudioBuffer({numberOfChannels: 2, length: 4, sampleRate: ' +
+ sampleRate + '})')
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define(
+ {label: '1-channel input', description: 'produces 2-channel output'},
+ (task, should) => {
+ stereoResponseTest({numberOfInputs: 1, prefix: '1'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: '2-channel input', description: 'produces 2-channel output'},
+ (task, should) => {
+ stereoResponseTest({numberOfInputes: 2, prefix: '2'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '3-channel input',
+ description: '3->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ stereoResponseTest({numberOfInputs: 3, prefix: '3'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '4-channel input',
+ description: '4->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ stereoResponseTest({numberOfInputs: 4, prefix: '4'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '5.1-channel input',
+ description: '5.1->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ // Scale tolerance by maximum amplitude expected in down-mix
+ // output.
+ let threshold = (1.0 + Math.sqrt(0.5) * 2) * absoluteThreshold;
+
+ stereoResponseTest({numberOfInputs: 6, prefix: '5.1',
+ absoluteThreshold: threshold}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '2-channel input, explicit mode',
+ description: 'produces 2-channel output'
+ },
+ (task, should) => {
+ stereoResponseExplicitTest(
+ {
+ numberOfInputes: 2,
+ prefix: '2-in explicit mode'
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '3-channel input explicit mode',
+ description: '3->1 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ stereoResponseExplicitTest(
+ {
+ numberOfInputs: 3,
+ prefix: '3-in explicit'
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '4-channel input explicit mode',
+ description: '4->1 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ stereoResponseExplicitTest(
+ {
+ numberOfInputs: 4,
+ prefix: '4-in explicit'
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '5.1-channel input explicit mode',
+ description: '5.1->1 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ // Scale tolerance by maximum amplitude expected in down-mix
+ // output.
+ let threshold = (Math.sqrt(0.5) * 2 + 2.0) * absoluteThreshold;
+
+ stereoResponseExplicitTest(
+ {
+ numberOfInputs: 6,
+ prefix: '5.1-in explicit',
+ absoluteThreshold: threshold
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ function stereoResponseTest(options, should) {
+ // Create an 4-channel offline context. The first two channels are for
+ // the stereo output of the convolver and the next two channels are for
+ // the reference stereo signal.
+ let context = new OfflineAudioContext(4, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ // Create oscillators for use as the input. The type and frequency is
+ // arbitrary except that oscillators must be different.
+ let src = new Array(options.numberOfInputs);
+ for (let k = 0; k < src.length; ++k) {
+ src[k] = new OscillatorNode(
+ context, {type: 'square', frequency: 440 + 220 * k});
+ }
+
+ // Merger to combine the oscillators into one output stream.
+ let srcMerger =
+ new ChannelMergerNode(context, {numberOfInputs: src.length});
+
+ for (let k = 0; k < src.length; ++k) {
+ src[k].connect(srcMerger, 0, k);
+ }
+
+ // Convolver under test.
+ let conv = new ConvolverNode(
+ context, {disableNormalization: true, buffer: response});
+ srcMerger.connect(conv);
+
+ // Splitter to get individual channels of the convolver output so we can
+ // feed them (eventually) to the context in the right set of channels.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ conv.connect(splitter);
+
+ // Reference graph consists of a delays node to simulate the response of
+ // the convolver. (The convolver response is designed this way.)
+ let delay = new Array(2);
+ for (let k = 0; k < delay.length; ++k) {
+ delay[k] = new DelayNode(context, {
+ delayTime: (k + 1) / context.sampleRate,
+ channelCount: 1,
+ channelCountMode: 'explicit'
+ });
+ }
+
+ // Gain node to mix the sources to stereo in the desired way. (Could be
+ // done in the delay node, but let's keep the mixing separated from the
+ // functionality.)
+ let gainMixer = new GainNode(
+ context, {channelCount: 2, channelCountMode: 'explicit'});
+ srcMerger.connect(gainMixer);
+
+ // Splitter to extract the channels of the reference signal.
+ let refSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ gainMixer.connect(refSplitter);
+
+ // Connect each channel to the delay nodes
+ for (let k = 0; k < delay.length; ++k) {
+ refSplitter.connect(delay[k], k);
+ }
+
+ // Final merger to bring back the individual channels from the convolver
+ // and the reference in the right order for the destination.
+ let finalMerger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ // First two channels are for the convolver output, and the next two are
+ // for the reference.
+ splitter.connect(finalMerger, 0, 0);
+ splitter.connect(finalMerger, 1, 1);
+ delay[0].connect(finalMerger, 0, 2);
+ delay[1].connect(finalMerger, 0, 3);
+
+ finalMerger.connect(context.destination);
+
+ // Start the sources at last.
+ for (let k = 0; k < src.length; ++k) {
+ src[k].start();
+ }
+
+ return context.startRendering().then(audioBuffer => {
+ // Extract the various channels out
+ let actual0 = audioBuffer.getChannelData(0);
+ let actual1 = audioBuffer.getChannelData(1);
+ let expected0 = audioBuffer.getChannelData(2);
+ let expected1 = audioBuffer.getChannelData(3);
+
+ let threshold = options.absoluteThreshold ?
+ options.absoluteThreshold : absoluteThreshold;
+
+ // Verify that each output channel of the convolver matches
+ // the delayed signal from the reference
+ should(actual0, options.prefix + ': Channel 0')
+ .beCloseToArray(expected0, {absoluteThreshold: threshold});
+ should(actual1, options.prefix + ': Channel 1')
+ .beCloseToArray(expected1, {absoluteThreshold: threshold});
+ });
+ }
+
+ function stereoResponseExplicitTest(options, should) {
+ // Create an 4-channel offline context. The first two channels are for
+ // the stereo output of the convolver and the next two channels are for
+ // the reference stereo signal.
+ let context = new OfflineAudioContext(4, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ // Create oscillators for use as the input. The type and frequency is
+ // arbitrary except that oscillators must be different.
+ let src = new Array(options.numberOfInputs);
+ for (let k = 0; k < src.length; ++k) {
+ src[k] = new OscillatorNode(
+ context, {type: 'square', frequency: 440 + 220 * k});
+ }
+
+ // Merger to combine the oscillators into one output stream.
+ let srcMerger =
+ new ChannelMergerNode(context, {numberOfInputs: src.length});
+
+ for (let k = 0; k < src.length; ++k) {
+ src[k].connect(srcMerger, 0, k);
+ }
+
+ // Convolver under test.
+ let conv = new ConvolverNode(context, {
+ channelCount: 1,
+ channelCountMode: 'explicit',
+ disableNormalization: true,
+ buffer: response
+ });
+ srcMerger.connect(conv);
+
+ // Splitter to get individual channels of the convolver output so we can
+ // feed them (eventually) to the context in the right set of channels.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ conv.connect(splitter);
+
+ // Reference graph consists of a delays node to simulate the response of
+ // the convolver. (The convolver response is designed this way.)
+ let delay = new Array(2);
+ for (let k = 0; k < delay.length; ++k) {
+ delay[k] = new DelayNode(context, {
+ delayTime: (k + 1) / context.sampleRate,
+ channelCount: 1,
+ channelCountMode: 'explicit'
+ });
+ }
+
+ // Gain node to mix the sources in the same way as the convolver.
+ let gainMixer = new GainNode(
+ context, {channelCount: 1, channelCountMode: 'explicit'});
+ srcMerger.connect(gainMixer);
+
+ // Connect each channel to the delay nodes
+ for (let k = 0; k < delay.length; ++k) {
+ gainMixer.connect(delay[k]);
+ }
+
+ // Final merger to bring back the individual channels from the convolver
+ // and the reference in the right order for the destination.
+ let finalMerger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ // First two channels are for the convolver output, and the next two are
+ // for the reference.
+ splitter.connect(finalMerger, 0, 0);
+ splitter.connect(finalMerger, 1, 1);
+ delay[0].connect(finalMerger, 0, 2);
+ delay[1].connect(finalMerger, 0, 3);
+
+ finalMerger.connect(context.destination);
+
+ // Start the sources at last.
+ for (let k = 0; k < src.length; ++k) {
+ src[k].start();
+ }
+
+ return context.startRendering().then(audioBuffer => {
+ // Extract the various channels out
+ let actual0 = audioBuffer.getChannelData(0);
+ let actual1 = audioBuffer.getChannelData(1);
+ let expected0 = audioBuffer.getChannelData(2);
+ let expected1 = audioBuffer.getChannelData(3);
+
+ let threshold = options.absoluteThreshold ?
+ options.absoluteThreshold : absoluteThreshold;
+
+ // Verify that each output channel of the convolver matches
+ // the delayed signal from the reference
+ should(actual0, options.prefix + ': Channel 0')
+ .beCloseToArray(expected0, {absoluteThreshold: threshold});
+ should(actual1, options.prefix + ': Channel 1')
+ .beCloseToArray(expected1, {absoluteThreshold: threshold});
+ });
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html
new file mode 100644
index 0000000000..cf3986e8d0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html
@@ -0,0 +1,508 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Convolver Channel Outputs for Response with 4 channels
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Test various convolver configurations when the convolver response has
+ // a four channels.
+
+ // This is somewhat arbitrary. It is the minimum value for which tests
+ // pass with both FFmpeg and KISS FFT implementations for 256 points.
+ // The value was similar for each implementation.
+ const absoluteThreshold = 3 * Math.pow(2, -22);
+
+ // Fairly arbitrary sample rate, except that we want the rate to be a
+ // power of two so that 1/sampleRate is exactly respresentable as a
+ // single-precision float.
+ let sampleRate = 8192;
+
+ // A fairly arbitrary number of frames, except the number of frames should
+ // be more than a few render quanta.
+ let renderFrames = 10 * 128;
+
+ let audit = Audit.createTaskRunner();
+
+ // Convolver response
+ let response;
+
+ audit.define(
+ {
+ label: 'initialize',
+ description: 'Convolver response with one channel'
+ },
+ (task, should) => {
+ // Convolver response
+ should(
+ () => {
+ response = new AudioBuffer(
+ {numberOfChannels: 4, length: 8, sampleRate: sampleRate});
+ // Each channel of the response is a simple impulse (with
+ // different delay) so that we can use a DelayNode to simulate
+ // the convolver output. Channel k is delayed by k+1 frames.
+ for (let k = 0; k < response.numberOfChannels; ++k) {
+ response.getChannelData(k)[k + 1] = 1;
+ }
+ },
+ 'new AudioBuffer({numberOfChannels: 2, length: 4, sampleRate: ' +
+ sampleRate + '})')
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.define(
+ {label: '1-channel input', description: 'produces 2-channel output'},
+ (task, should) => {
+ fourChannelResponseTest({numberOfInputs: 1, prefix: '1'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: '2-channel input', description: 'produces 2-channel output'},
+ (task, should) => {
+ fourChannelResponseTest({numberOfInputs: 2, prefix: '2'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '3-channel input',
+ description: '3->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ fourChannelResponseTest({numberOfInputs: 3, prefix: '3'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '4-channel input',
+ description: '4->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ fourChannelResponseTest({numberOfInputs: 4, prefix: '4'}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: '5.1-channel input',
+ description: '5.1->2 downmix producing 2-channel output'
+ },
+ (task, should) => {
+ // Scale tolerance by maximum amplitude expected in down-mix
+ // output.
+ let threshold = (1.0 + Math.sqrt(0.5) * 2) * absoluteThreshold;
+
+ fourChannelResponseTest({numberOfInputs: 6, prefix: '5.1',
+ absoluteThreshold: threshold}, should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'delayed buffer set',
+ description: 'Delayed set of 4-channel response'
+ },
+ (task, should) => {
+ // Don't really care about the output for this test. It's to verify
+ // we don't crash in a debug build when setting the convolver buffer
+ // after creating the graph.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+ let src = new OscillatorNode(context);
+ let convolver =
+ new ConvolverNode(context, {disableNormalization: true});
+ let buffer = new AudioBuffer({
+ numberOfChannels: 4,
+ length: 4,
+ sampleRate: context.sampleRate
+ });
+
+ // Impulse responses for the convolver aren't important, as long as
+ // it's not all zeroes.
+ for (let k = 0; k < buffer.numberOfChannels; ++k) {
+ buffer.getChannelData(k).fill(1);
+ }
+
+ src.connect(convolver).connect(context.destination);
+
+ // Set the buffer after a few render quanta have passed. The actual
+ // value must be least one, but is otherwise arbitrary.
+ context.suspend(512 / context.sampleRate)
+ .then(() => convolver.buffer = buffer)
+ .then(() => context.resume());
+
+ src.start();
+ context.startRendering()
+ .then(audioBuffer => {
+ // Just make sure output is not silent.
+ should(
+ audioBuffer.getChannelData(0),
+ 'Output with delayed setting of convolver buffer')
+ .notBeConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'count 1, 2-channel in',
+ description: '2->1 downmix because channel count is 1'
+ },
+ (task, should) => {
+ channelCount1ExplicitTest(
+ {numberOfInputs: 1, prefix: 'Convolver count 1, stereo in'},
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'count 1, 4-channel in',
+ description: '4->1 downmix because channel count is 1'
+ },
+ (task, should) => {
+ channelCount1ExplicitTest(
+ {numberOfInputs: 4, prefix: 'Convolver count 1, 4-channel in'},
+ should)
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'count 1, 5.1-channel in',
+ description: '5.1->1 downmix because channel count is 1'
+ },
+ (task, should) => {
+ channelCount1ExplicitTest(
+ {
+ numberOfInputs: 6,
+ prefix: 'Convolver count 1, 5.1 channel in'
+ },
+ should)
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ function fourChannelResponseTest(options, should) {
+ // Create an 4-channel offline context. The first two channels are for
+ // the stereo output of the convolver and the next two channels are for
+ // the reference stereo signal.
+ let context = new OfflineAudioContext(4, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ // Create oscillators for use as the input. The type and frequency is
+ // arbitrary except that oscillators must be different.
+ let src = new Array(options.numberOfInputs);
+ for (let k = 0; k < src.length; ++k) {
+ src[k] = new OscillatorNode(
+ context, {type: 'square', frequency: 440 + 220 * k});
+ }
+
+ // Merger to combine the oscillators into one output stream.
+ let srcMerger =
+ new ChannelMergerNode(context, {numberOfInputs: src.length});
+
+ for (let k = 0; k < src.length; ++k) {
+ src[k].connect(srcMerger, 0, k);
+ }
+
+ // Convolver under test.
+ let conv = new ConvolverNode(
+ context, {disableNormalization: true, buffer: response});
+ srcMerger.connect(conv);
+
+ // Splitter to get individual channels of the convolver output so we can
+ // feed them (eventually) to the context in the right set of channels.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ conv.connect(splitter);
+
+ // Reference graph consists of a delays node to simulate the response of
+ // the convolver. (The convolver response is designed this way.)
+ let delay = new Array(4);
+ for (let k = 0; k < delay.length; ++k) {
+ delay[k] = new DelayNode(context, {
+ delayTime: (k + 1) / context.sampleRate,
+ channelCount: 1,
+ channelCountMode: 'explicit'
+ });
+ }
+
+ // Gain node to mix the sources to stereo in the desired way. (Could be
+ // done in the delay node, but let's keep the mixing separated from the
+ // functionality.)
+ let gainMixer = new GainNode(
+ context, {channelCount: 2, channelCountMode: 'explicit'});
+ srcMerger.connect(gainMixer);
+
+ // Splitter to extract the channels of the reference signal.
+ let refSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ gainMixer.connect(refSplitter);
+
+ // Connect the left channel to the first two nodes and the right channel
+ // to the second two as required for "true" stereo matrix response.
+ for (let k = 0; k < 2; ++k) {
+ refSplitter.connect(delay[k], 0, 0);
+ refSplitter.connect(delay[k + 2], 1, 0);
+ }
+
+ // Gain nodes to sum the responses to stereo
+ let gain = new Array(2);
+ for (let k = 0; k < gain.length; ++k) {
+ gain[k] = new GainNode(context, {
+ channelCount: 1,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'discrete'
+ });
+ }
+
+ delay[0].connect(gain[0]);
+ delay[2].connect(gain[0]);
+ delay[1].connect(gain[1]);
+ delay[3].connect(gain[1]);
+
+ // Final merger to bring back the individual channels from the convolver
+ // and the reference in the right order for the destination.
+ let finalMerger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ // First two channels are for the convolver output, and the next two are
+ // for the reference.
+ splitter.connect(finalMerger, 0, 0);
+ splitter.connect(finalMerger, 1, 1);
+ gain[0].connect(finalMerger, 0, 2);
+ gain[1].connect(finalMerger, 0, 3);
+
+ finalMerger.connect(context.destination);
+
+ // Start the sources at last.
+ for (let k = 0; k < src.length; ++k) {
+ src[k].start();
+ }
+
+ return context.startRendering().then(audioBuffer => {
+ // Extract the various channels out
+ let actual0 = audioBuffer.getChannelData(0);
+ let actual1 = audioBuffer.getChannelData(1);
+ let expected0 = audioBuffer.getChannelData(2);
+ let expected1 = audioBuffer.getChannelData(3);
+
+ let threshold = options.absoluteThreshold ?
+ options.absoluteThreshold : absoluteThreshold;
+
+ // Verify that each output channel of the convolver matches
+ // the delayed signal from the reference
+ should(actual0, options.prefix + ': Channel 0')
+ .beCloseToArray(expected0, {absoluteThreshold: threshold});
+ should(actual1, options.prefix + ': Channel 1')
+ .beCloseToArray(expected1, {absoluteThreshold: threshold});
+ });
+ }
+
+ function fourChannelResponseExplicitTest(options, should) {
+ // Create an 4-channel offline context. The first two channels are for
+ // the stereo output of the convolver and the next two channels are for
+ // the reference stereo signal.
+ let context = new OfflineAudioContext(4, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+
+ // Create oscillators for use as the input. The type and frequency is
+ // arbitrary except that oscillators must be different.
+ let src = new Array(options.numberOfInputs);
+ for (let k = 0; k < src.length; ++k) {
+ src[k] = new OscillatorNode(
+ context, {type: 'square', frequency: 440 + 220 * k});
+ }
+
+ // Merger to combine the oscillators into one output stream.
+ let srcMerger =
+ new ChannelMergerNode(context, {numberOfInputs: src.length});
+
+ for (let k = 0; k < src.length; ++k) {
+ src[k].connect(srcMerger, 0, k);
+ }
+
+ // Convolver under test.
+ let conv = new ConvolverNode(
+ context, {disableNormalization: true, buffer: response});
+ srcMerger.connect(conv);
+
+ // Splitter to get individual channels of the convolver output so we can
+ // feed them (eventually) to the context in the right set of channels.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ conv.connect(splitter);
+
+ // Reference graph consists of a delays node to simulate the response of
+ // the convolver. (The convolver response is designed this way.)
+ let delay = new Array(4);
+ for (let k = 0; k < delay.length; ++k) {
+ delay[k] = new DelayNode(context, {
+ delayTime: (k + 1) / context.sampleRate,
+ channelCount: 1,
+ channelCountMode: 'explicit'
+ });
+ }
+
+ // Gain node to mix the sources to stereo in the desired way. (Could be
+ // done in the delay node, but let's keep the mixing separated from the
+ // functionality.)
+ let gainMixer = new GainNode(
+ context, {channelCount: 2, channelCountMode: 'explicit'});
+ srcMerger.connect(gainMixer);
+
+ // Splitter to extract the channels of the reference signal.
+ let refSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ gainMixer.connect(refSplitter);
+
+ // Connect the left channel to the first two nodes and the right channel
+ // to the second two as required for "true" stereo matrix response.
+ for (let k = 0; k < 2; ++k) {
+ refSplitter.connect(delay[k], 0, 0);
+ refSplitter.connect(delay[k + 2], 1, 0);
+ }
+
+ // Gain nodes to sum the responses to stereo
+ let gain = new Array(2);
+ for (let k = 0; k < gain.length; ++k) {
+ gain[k] = new GainNode(context, {
+ channelCount: 1,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'discrete'
+ });
+ }
+
+ delay[0].connect(gain[0]);
+ delay[2].connect(gain[0]);
+ delay[1].connect(gain[1]);
+ delay[3].connect(gain[1]);
+
+ // Final merger to bring back the individual channels from the convolver
+ // and the reference in the right order for the destination.
+ let finalMerger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+
+ // First two channels are for the convolver output, and the next two are
+ // for the reference.
+ splitter.connect(finalMerger, 0, 0);
+ splitter.connect(finalMerger, 1, 1);
+ gain[0].connect(finalMerger, 0, 2);
+ gain[1].connect(finalMerger, 0, 3);
+
+ finalMerger.connect(context.destination);
+
+ // Start the sources at last.
+ for (let k = 0; k < src.length; ++k) {
+ src[k].start();
+ }
+
+ return context.startRendering().then(audioBuffer => {
+ // Extract the various channels out
+ let actual0 = audioBuffer.getChannelData(0);
+ let actual1 = audioBuffer.getChannelData(1);
+ let expected0 = audioBuffer.getChannelData(2);
+ let expected1 = audioBuffer.getChannelData(3);
+
+ // Verify that each output channel of the convolver matches
+ // the delayed signal from the reference
+ should(actual0, options.prefix + ': Channel 0')
+ .beEqualToArray(expected0);
+ should(actual1, options.prefix + ': Channel 1')
+ .beEqualToArray(expected1);
+ });
+ }
+
+ function channelCount1ExplicitTest(options, should) {
+ // Create an 4-channel offline context. The first two channels are
+ // for the stereo output of the convolver and the next two channels
+ // are for the reference stereo signal.
+ let context = new OfflineAudioContext(4, renderFrames, sampleRate);
+ context.destination.channelInterpretation = 'discrete';
+ // Final merger to bring back the individual channels from the
+ // convolver and the reference in the right order for the
+ // destination.
+ let finalMerger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ finalMerger.connect(context.destination);
+
+ // Create source using oscillators
+ let src = new Array(options.numberOfInputs);
+ for (let k = 0; k < src.length; ++k) {
+ src[k] = new OscillatorNode(
+ context, {type: 'square', frequency: 440 + 220 * k});
+ }
+
+ // Merger to combine the oscillators into one output stream.
+ let srcMerger =
+ new ChannelMergerNode(context, {numberOfInputs: src.length});
+ for (let k = 0; k < src.length; ++k) {
+ src[k].connect(srcMerger, 0, k);
+ }
+
+ // Convolver under test
+ let conv = new ConvolverNode(context, {
+ channelCount: 1,
+ channelCountMode: 'explicit',
+ disableNormalization: true,
+ buffer: response
+ });
+ srcMerger.connect(conv);
+
+ // Splitter to extract the channels of the test signal.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ conv.connect(splitter);
+
+ // Reference convolver, with a gain node to do the desired mixing. The
+ // gain node should do the same thing that the convolver under test
+ // should do.
+ let gain = new GainNode(
+ context, {channelCount: 1, channelCountMode: 'explicit'});
+ let convRef = new ConvolverNode(
+ context, {disableNormalization: true, buffer: response});
+
+ srcMerger.connect(gain).connect(convRef);
+
+ // Splitter to extract the channels of the reference signal.
+ let refSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ convRef.connect(refSplitter);
+
+ // Merge all the channels into one
+ splitter.connect(finalMerger, 0, 0);
+ splitter.connect(finalMerger, 1, 1);
+ refSplitter.connect(finalMerger, 0, 2);
+ refSplitter.connect(finalMerger, 1, 3);
+
+ // Start sources and render!
+ for (let k = 0; k < src.length; ++k) {
+ src[k].start();
+ }
+
+ return context.startRendering().then(buffer => {
+ // The output from the test convolver should be identical to
+ // the reference result.
+ let testOut0 = buffer.getChannelData(0);
+ let testOut1 = buffer.getChannelData(1);
+ let refOut0 = buffer.getChannelData(2);
+ let refOut1 = buffer.getChannelData(3);
+
+ should(testOut0, `${options.prefix}: output 0`)
+ .beEqualToArray(refOut0);
+ should(testOut1, `${options.prefix}: output 1`)
+ .beEqualToArray(refOut1);
+ })
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-already-has-value.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-already-has-value.html
new file mode 100644
index 0000000000..ce2d5fcfe9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-already-has-value.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ convolver-setBuffer-already-has-value.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('test', (task, should) => {
+ let context = new AudioContext();
+ let audioBuffer = new AudioBuffer(
+ {numberOfChannels: 1, length: 1, sampleRate: context.sampleRate});
+ let convolver = context.createConvolver();
+ should(() => {
+ convolver.buffer = null;
+ }, 'Set buffer to null before set non-null').notThrow();
+
+ should(() => {
+ convolver.buffer = audioBuffer;
+ }, 'Set buffer first normally').notThrow();
+
+ should(() => {
+ convolver.buffer = audioBuffer;
+ }, 'Set buffer a second time').notThrow();
+
+ should(() => {
+ convolver.buffer = null;
+ }, 'Set buffer to null').notThrow();
+
+ should(() => {
+ convolver.buffer = null;
+ }, 'Set buffer to null again, to make sure').notThrow();
+
+ should(() => {
+ convolver.buffer = audioBuffer;
+ }, 'Set buffer to non-null to verify it is set')
+ .notThrow();
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-null.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-null.html
new file mode 100644
index 0000000000..d35b8ec54b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-setBuffer-null.html
@@ -0,0 +1,31 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ convolver-setBuffer-null.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('test', function(task, should) {
+ let context = new AudioContext();
+ let conv = context.createConvolver();
+
+ should(() => {
+ conv.buffer = null;
+ }, 'Setting ConvolverNode impulse response buffer to null').notThrow();
+ should(conv.buffer === null, 'conv.buffer === null').beTrue();
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-upmixing-1-channel-response.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-upmixing-1-channel-response.html
new file mode 100644
index 0000000000..b0b3a5965e
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/convolver-upmixing-1-channel-response.html
@@ -0,0 +1,143 @@
+<!DOCTYPE html>
+<title>Test that up-mixing signals in ConvolverNode processing is linear</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+const EPSILON = 3.0 * Math.pow(2, -22);
+// sampleRate is a power of two so that delay times are exact in base-2
+// floating point arithmetic.
+const SAMPLE_RATE = 32768;
+// Length of stereo convolver input in frames (arbitrary):
+const STEREO_FRAMES = 256;
+// Length of mono signal in frames. This is more than two blocks to ensure
+// that at least one block will be mono, even if interpolation in the
+// DelayNode means that stereo is output one block earlier and later than
+// if frames are delayed without interpolation.
+const MONO_FRAMES = 384;
+// Length of response buffer:
+const RESPONSE_FRAMES = 256;
+
+function test_linear_upmixing(channelInterpretation, initial_mono_frames)
+{
+ let stereo_input_end = initial_mono_frames + STEREO_FRAMES;
+ // Total length:
+ let length = stereo_input_end + RESPONSE_FRAMES + MONO_FRAMES + STEREO_FRAMES;
+ // The first two channels contain signal where some up-mixing occurs
+ // internally to a ConvolverNode when a stereo signal is added and removed.
+ // The last two channels are expected to contain the same signal, but mono
+ // and stereo signals are convolved independently before up-mixing the mono
+ // output to mix with the stereo output.
+ let context = new OfflineAudioContext({numberOfChannels: 4,
+ length: length,
+ sampleRate: SAMPLE_RATE});
+
+ let response = new AudioBuffer({numberOfChannels: 1,
+ length: RESPONSE_FRAMES,
+ sampleRate: context.sampleRate});
+
+ // Two stereo channel splitters will collect test and reference outputs.
+ let destinationMerger = new ChannelMergerNode(context, {numberOfInputs: 4});
+ destinationMerger.connect(context.destination);
+ let testSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ let referenceSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ testSplitter.connect(destinationMerger, 0, 0);
+ testSplitter.connect(destinationMerger, 1, 1);
+ referenceSplitter.connect(destinationMerger, 0, 2);
+ referenceSplitter.connect(destinationMerger, 1, 3);
+
+ // A GainNode mixes reference stereo and mono signals because up-mixing
+ // cannot be performed at a channel splitter.
+ let referenceGain = new GainNode(context);
+ referenceGain.connect(referenceSplitter);
+ referenceGain.channelInterpretation = channelInterpretation;
+
+ // The impulse response for convolution contains two impulses so as to test
+ // effects in at least two processing blocks.
+ response.getChannelData(0)[0] = 0.5;
+ response.getChannelData(0)[response.length - 1] = 0.5;
+
+ let testConvolver = new ConvolverNode(context, {disableNormalization: true,
+ buffer: response});
+ testConvolver.channelInterpretation = channelInterpretation;
+ let referenceMonoConvolver = new ConvolverNode(context,
+ {disableNormalization: true,
+ buffer: response});
+ let referenceStereoConvolver = new ConvolverNode(context,
+ {disableNormalization: true,
+ buffer: response});
+ // No need to set referenceStereoConvolver.channelInterpretation because
+ // input is either silent or stereo.
+ testConvolver.connect(testSplitter);
+ // Mix reference convolver output.
+ referenceMonoConvolver.connect(referenceGain);
+ referenceStereoConvolver.connect(referenceGain);
+
+ // The DelayNode initially has a single channel of silence, which is used to
+ // switch the stereo signal in and out. The output of the delay node is
+ // first mono silence (if there is a non-zero initial_mono_frames), then
+ // stereo, then mono silence, and finally stereo again. maxDelayTime is
+ // used to generate the middle mono silence period from the initial silence
+ // in the DelayNode and then generate the final period of stereo from its
+ // initial input.
+ let maxDelayTime = (length - STEREO_FRAMES) / context.sampleRate;
+ let delay =
+ new DelayNode(context,
+ {maxDelayTime: maxDelayTime,
+ delayTime: initial_mono_frames / context.sampleRate});
+ // Schedule an increase in the delay to return to mono silence.
+ delay.delayTime.setValueAtTime(maxDelayTime,
+ stereo_input_end / context.sampleRate);
+ delay.connect(testConvolver);
+ delay.connect(referenceStereoConvolver);
+
+ let stereoMerger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ stereoMerger.connect(delay);
+
+ // Three independent signals
+ let monoSignal = new OscillatorNode(context, {frequency: 440});
+ let leftSignal = new OscillatorNode(context, {frequency: 450});
+ let rightSignal = new OscillatorNode(context, {frequency: 460});
+ monoSignal.connect(testConvolver);
+ monoSignal.connect(referenceMonoConvolver);
+ leftSignal.connect(stereoMerger, 0, 0);
+ rightSignal.connect(stereoMerger, 0, 1);
+ monoSignal.start();
+ leftSignal.start();
+ rightSignal.start();
+
+ return context.startRendering().
+ then((buffer) => {
+ let maxDiff = -1.0;
+ let frameIndex = 0;
+ let channelIndex = 0;
+ for (let c = 0; c < 2; ++c) {
+ let testOutput = buffer.getChannelData(0 + c);
+ let referenceOutput = buffer.getChannelData(2 + c);
+ for (var i = 0; i < buffer.length; ++i) {
+ var diff = Math.abs(testOutput[i] - referenceOutput[i]);
+ if (diff > maxDiff) {
+ maxDiff = diff;
+ frameIndex = i;
+ channelIndex = c;
+ }
+ }
+ }
+ assert_approx_equals(buffer.getChannelData(0 + channelIndex)[frameIndex],
+ buffer.getChannelData(2 + channelIndex)[frameIndex],
+ EPSILON,
+ `output at ${frameIndex} ` +
+ `in channel ${channelIndex}` );
+ });
+}
+
+promise_test(() => test_linear_upmixing("speakers", MONO_FRAMES),
+ "speakers, initially mono");
+promise_test(() => test_linear_upmixing("discrete", MONO_FRAMES),
+ "discrete");
+// Gecko uses a separate path for "speakers" up-mixing when the convolver's
+// first input is stereo, so test that separately.
+promise_test(() => test_linear_upmixing("speakers", 0),
+ "speakers, initially stereo");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/ctor-convolver.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/ctor-convolver.html
new file mode 100644
index 0000000000..28a0fc1c3c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/ctor-convolver.html
@@ -0,0 +1,186 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: Convolver
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'ConvolverNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'ConvolverNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(
+ should, node, prefix,
+ [{name: 'normalize', value: true}, {name: 'buffer', value: null}]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ // Can't use testAudioNodeOptions because the constraints for this node
+ // are not supported there.
+ let node;
+
+ // An array of tests.
+ [{
+ // Test that we can set the channel count to 1 or 2 and that other
+ // channel counts throw an error.
+ attribute: 'channelCount',
+ tests: [
+ {value: 1}, {value: 2}, {value: 0, error: 'NotSupportedError'},
+ {value: 3, error: 'NotSupportedError'},
+ {value: 99, error: 'NotSupportedError'}
+ ]
+ },
+ {
+ // Test channelCountMode. A mode of "max" is illegal, but others are
+ // ok. But also throw an error of unknown values.
+ attribute: 'channelCountMode',
+ tests: [
+ {value: 'clamped-max'}, {value: 'explicit'},
+ {value: 'max', error: 'NotSupportedError'},
+ {value: 'foobar', error: TypeError}
+ ]
+ },
+ {
+ // Test channelInterpretation can be set for valid values and an
+ // error is thrown for others.
+ attribute: 'channelInterpretation',
+ tests: [
+ {value: 'speakers'}, {value: 'discrete'},
+ {value: 'foobar', error: TypeError}
+ ]
+ }].forEach(entry => {
+ entry.tests.forEach(testItem => {
+ let options = {};
+ options[entry.attribute] = testItem.value;
+
+ const testFunction = () => {
+ node = new ConvolverNode(context, options);
+ };
+ const testDescription =
+ `new ConvolverNode(c, ${JSON.stringify(options)})`;
+
+ if (testItem.error) {
+ testItem.error === TypeError
+ ? should(testFunction, testDescription).throw(TypeError)
+ : should(testFunction, testDescription)
+ .throw(DOMException, 'NotSupportedError');
+ } else {
+ should(testFunction, testDescription).notThrow();
+ should(node[entry.attribute], `node.${entry.attribute}`)
+ .beEqualTo(options[entry.attribute]);
+ }
+ });
+ });
+
+ task.done();
+ });
+
+ audit.define('nullable buffer', (task, should) => {
+ let node;
+ let options = {buffer: null};
+
+ should(
+ () => {
+ node = new ConvolverNode(context, options);
+ },
+ 'node1 = new ConvolverNode(c, ' + JSON.stringify(options))
+ .notThrow();
+
+ should(node.buffer, 'node1.buffer').beEqualTo(null);
+
+ task.done();
+ });
+ audit.define('illegal sample-rate', (task, should) => {
+ let node;
+ let options = {buffer: context.createBuffer(1, 1, context.sampleRate / 2)};
+
+ should(
+ () => {
+ node = new ConvolverNode(context, options);
+ },
+ 'node1 = new ConvolverNode(c, ' + JSON.stringify(options))
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define('construct with options', (task, should) => {
+ let buf = context.createBuffer(1, 1, context.sampleRate);
+ let options = {buffer: buf, disableNormalization: false};
+
+ let message =
+ 'node = new ConvolverNode(c, ' + JSON.stringify(options) + ')';
+
+ let node;
+ should(() => {
+ node = new ConvolverNode(context, options);
+ }, message).notThrow();
+
+ should(node instanceof ConvolverNode, 'node1 instanceOf ConvolverNode')
+ .beEqualTo(true);
+ should(node.buffer === options.buffer, 'node1.buffer === <buf>')
+ .beEqualTo(true);
+ should(node.normalize, 'node1.normalize')
+ .beEqualTo(!options.disableNormalization);
+
+ options.buffer = null;
+ options.disableNormalization = true;
+
+ message =
+ 'node2 = new ConvolverNode(, ' + JSON.stringify(options) + ')';
+
+ should(() => {
+ node = new ConvolverNode(context, options);
+ }, message).notThrow();
+ should(node.buffer, 'node2.buffer').beEqualTo(null);
+ should(node.normalize, 'node2.normalize')
+ .beEqualTo(!options.disableNormalization);
+
+ options.disableNormalization = false;
+ message = 'node3 = new ConvolverNode(context, ' +
+ JSON.stringify(options) + ')';
+
+ should(() => {
+ node = new ConvolverNode(context, options);
+ }, message).notThrow();
+ should(node.buffer, 'node3.buffer').beEqualTo(null);
+ should(node.normalize, 'node3.normalize')
+ .beEqualTo(!options.disableNormalization);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/realtime-conv.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/realtime-conv.html
new file mode 100644
index 0000000000..8668e9d5ac
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/realtime-conv.html
@@ -0,0 +1,149 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Convolver on Real-time Context
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/convolution-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+ // Choose a length that is larger enough to cause multiple threads to be
+ // used in the convolver. For browsers that don't support this, this
+ // value doesn't matter.
+ const pulseLength = 16384;
+
+ // The computed SNR should be at least this large. This value depends on
+ // teh platform and browser. Don't set this value to be to much lower
+ // than this. It probably indicates a fairly inaccurate convolver or
+ // constant source node automations that should be fixed instead.
+ const minRequiredSNR = 77.03;
+
+ // To test the real-time convolver, we convolve two square pulses together
+ // to produce a triangular pulse. To verify the result is correct we
+ // compare it against a constant source node configured to generate the
+ // expected ramp.
+ audit.define(
+ {label: 'test', description: 'Test convolver with real-time context'},
+ (task, should) => {
+ // Use a power of two for the sample rate to eliminate round-off in
+ // computing times from frames.
+ const context = new AudioContext({sampleRate: 16384});
+
+ // Square pulse for the convolver impulse response.
+ const squarePulse = new AudioBuffer(
+ {length: pulseLength, sampleRate: context.sampleRate});
+ squarePulse.getChannelData(0).fill(1);
+
+ const convolver = new ConvolverNode(
+ context, {buffer: squarePulse, disableNormalization: true});
+
+ // Square pulse for testing
+ const srcSquare = new ConstantSourceNode(context, {offset: 0});
+ srcSquare.connect(convolver);
+
+ // Reference ramp. Automations on this constant source node will
+ // generate the desired ramp.
+ const srcRamp = new ConstantSourceNode(context, {offset: 0});
+
+ // Use these gain nodes to compute the difference between the
+ // convolver output and the expected ramp to create the error
+ // signal.
+ const inverter = new GainNode(context, {gain: -1});
+ const sum = new GainNode(context, {gain: 1});
+ convolver.connect(sum);
+ srcRamp.connect(inverter).connect(sum);
+
+ // Square the error signal using this gain node.
+ const squarer = new GainNode(context, {gain: 0});
+ sum.connect(squarer);
+ sum.connect(squarer.gain);
+
+ // Merge the error signal and the square source so we can integrate
+ // the error signal to find an SNR.
+ const merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+
+ squarer.connect(merger, 0, 0);
+ srcSquare.connect(merger, 0, 1);
+
+ // For simplicity, use a ScriptProcessor to integrate the error
+ // signal. The square pulse signal is used as a gate over which the
+ // integration is done. When the pulse ends, the SNR is computed
+ // and the test ends.
+
+ // |doSum| is used to determine when to integrate and when it
+ // becomes false, it signals the end of integration.
+ let doSum = false;
+
+ // |signalSum| is the energy in the square pulse. |errorSum| is the
+ // energy in the error signal.
+ let signalSum = 0;
+ let errorSum = 0;
+
+ let spn = context.createScriptProcessor(0, 2, 1);
+ spn.onaudioprocess = (event) => {
+ // Sum the values on the first channel when the second channel is
+ // not zero. When the second channel goes from non-zero to 0,
+ // dump the value out and terminate the test.
+ let c0 = event.inputBuffer.getChannelData(0);
+ let c1 = event.inputBuffer.getChannelData(1);
+
+ for (let k = 0; k < c1.length; ++k) {
+ if (c1[k] == 0) {
+ if (doSum) {
+ doSum = false;
+ // Square wave is now silent and we were integration, so we
+ // can stop now and verify the SNR.
+ should(10 * Math.log10(signalSum / errorSum), 'SNR')
+ .beGreaterThanOrEqualTo(minRequiredSNR);
+ spn.onaudioprocess = null;
+ task.done();
+ }
+ } else {
+ // Signal is non-zero so sum up the values.
+ doSum = true;
+ errorSum += c0[k];
+ signalSum += c1[k] * c1[k];
+ }
+ }
+ };
+
+ merger.connect(spn).connect(context.destination);
+
+ // Schedule everything to start a bit in the futurefrom now, and end
+ // pulseLength frames later.
+ let now = context.currentTime;
+
+ // |startFrame| is the number of frames to schedule ahead for
+ // testing.
+ const startFrame = 512;
+ const startTime = startFrame / context.sampleRate;
+ const pulseDuration = pulseLength / context.sampleRate;
+
+ // Create a square pulse in the constant source node.
+ srcSquare.offset.setValueAtTime(1, now + startTime);
+ srcSquare.offset.setValueAtTime(0, now + startTime + pulseDuration);
+
+ // Create the reference ramp.
+ srcRamp.offset.setValueAtTime(1, now + startTime);
+ srcRamp.offset.linearRampToValueAtTime(
+ pulseLength,
+ now + startTime + pulseDuration - 1 / context.sampleRate);
+ srcRamp.offset.linearRampToValueAtTime(
+ 0,
+ now + startTime + 2 * pulseDuration - 1 / context.sampleRate);
+
+ // Start the ramps!
+ srcRamp.start();
+ srcSquare.start();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/transferred-buffer-output.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/transferred-buffer-output.html
new file mode 100644
index 0000000000..e37a98c386
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/transferred-buffer-output.html
@@ -0,0 +1,107 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Convolver Output with Transferred Buffer
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // Arbitrary sample rate.
+ const sampleRate = 16000;
+
+ // Number of frames to render. Just need to have at least 2 render
+ // quanta.
+ const lengthInFrames = 10 * RENDER_QUANTUM_FRAMES;
+
+ let audit = Audit.createTaskRunner();
+
+ // Buffer to use for the impulse response of a ConvolverNode.
+ let impulseBuffer;
+
+ // This sets up a worker to receive one channel of an AudioBuffer.
+ function setUpWorkerForTest() {
+ impulseBuffer = new AudioBuffer({
+ numberOfChannels: 2,
+ length: 2 * RENDER_QUANTUM_FRAMES,
+ sampleRate: sampleRate
+ });
+
+ // Just fill the buffer with a constant value; the contents shouldn't
+ // matter for this test since we're transferring one of the channels.
+ impulseBuffer.getChannelData(0).fill(1);
+ impulseBuffer.getChannelData(1).fill(2);
+
+ // We're going to transfer channel 0 to the worker, making it
+ // unavailable for the convolver
+ let data = impulseBuffer.getChannelData(0).buffer;
+
+ let string = [
+ 'onmessage = function(e) {', ' postMessage(\'done\');', '};'
+ ].join('\n');
+
+ let blobURL = URL.createObjectURL(new Blob([string]));
+ let worker = new Worker(blobURL);
+ worker.onmessage = workerReply;
+ worker.postMessage(data, [data]);
+ }
+
+ function workerReply() {
+ // Worker has received the message. Run the test.
+ audit.run();
+ }
+
+ audit.define(
+ {
+ label: 'Test Convolver with transferred buffer',
+ description: 'Output should be all zeroes'
+ },
+ async (task, should) => {
+ // Two channels so we can capture the output of the convolver with a
+ // stereo convolver.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ length: lengthInFrames,
+ sampleRate: sampleRate
+ });
+
+ // Use a simple constant source so we easily check that the
+ // convolver output is correct.
+ let source = new ConstantSourceNode(context);
+
+ // Create the convolver with the desired impulse response and
+ // disable normalization so we can easily check the output.
+ let conv = new ConvolverNode(
+ context, {disableNormalization: true, buffer: impulseBuffer});
+
+ source.connect(conv).connect(context.destination);
+
+ source.start();
+
+ let renderedBuffer = await context.startRendering();
+
+ // Get the actual data
+ let c0 = renderedBuffer.getChannelData(0);
+ let c1 = renderedBuffer.getChannelData(1);
+
+ // Since one channel was transferred, we must behave as if all were
+ // transferred. Hence, the output should be all zeroes for both
+ // channels.
+ should(c0, `Convolver channel 0 output[0:${c0.length - 1}]`)
+ .beConstantValueOf(0);
+
+ should(c1, `Convolver channel 1 output[0:${c1.length - 1}]`)
+ .beConstantValueOf(0);
+
+ task.done();
+ });
+
+ setUpWorkerForTest();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/ctor-delay.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/ctor-delay.html
new file mode 100644
index 0000000000..e7ccefc655
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/ctor-delay.html
@@ -0,0 +1,76 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: Delay
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'DelayNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'DelayNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(
+ should, node, prefix, [{name: 'delayTime', value: 0}]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'DelayNode');
+ task.done();
+ });
+
+ audit.define('constructor options', (task, should) => {
+ let node;
+ let options = {
+ delayTime: 0.5,
+ maxDelayTime: 1.5,
+ };
+
+ should(
+ () => {
+ node = new DelayNode(context, options);
+ },
+ 'node1 = new DelayNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ should(node.delayTime.value, 'node1.delayTime.value')
+ .beEqualTo(options.delayTime);
+ should(node.delayTime.maxValue, 'node1.delayTime.maxValue')
+ .beEqualTo(options.maxDelayTime);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delay-test.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delay-test.html
new file mode 100644
index 0000000000..6277c253ec
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delay-test.html
@@ -0,0 +1,61 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test DelayNode Delay</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test0', description: 'Test delay of 3 frames'},
+ async (task, should) => {
+ // Only need a few outputs samples. The sample rate is arbitrary.
+ const context =
+ new OfflineAudioContext(1, RENDER_QUANTUM_FRAMES, 8192);
+ let src;
+ let delay;
+
+ should(
+ () => {
+ src = new ConstantSourceNode(context);
+ delay = new DelayNode(context);
+ },
+ 'Creating ConstantSourceNode(context) and DelayNode(context)')
+ .notThrow();
+
+ // The number of frames to delay for the DelayNode. Should be a
+ // whole number, but is otherwise arbitrary.
+ const delayFrames = 3;
+
+ should(() => {
+ delay.delayTime.value = delayFrames / context.sampleRate;
+ }, `Setting delayTime to ${delayFrames} frames`).notThrow();
+
+ src.connect(delay).connect(context.destination);
+
+ src.start();
+
+ let buffer = await context.startRendering();
+ let output = buffer.getChannelData(0);
+
+ // Verify output was delayed the correct number of frames.
+ should(output.slice(0, delayFrames), `output[0:${delayFrames - 1}]`)
+ .beConstantValueOf(0);
+ should(
+ output.slice(delayFrames),
+ `output[${delayFrames}:${output.length - 1}]`)
+ .beConstantValueOf(1);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-channel-count-1.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-channel-count-1.html
new file mode 100644
index 0000000000..dd964ef9e3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-channel-count-1.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html>
+<title>Test that DelayNode output channelCount matches that of the delayed input</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+// See https://github.com/WebAudio/web-audio-api/issues/25
+
+// sampleRate is a power of two so that delay times are exact in base-2
+// floating point arithmetic.
+const SAMPLE_RATE = 32768;
+// Arbitrary delay time in frames (but this is assumed a multiple of block
+// size below):
+const DELAY_FRAMES = 3 * 128;
+// Implementations may apply interpolation to input samples, which can spread
+// the effect of input with larger channel counts over neighbouring blocks.
+// This test ignores enough neighbouring blocks to ignore the effects of
+// filter radius of up to this number of frames:
+const INTERPOLATION_GRACE = 128;
+// Number of frames of DelayNode output that are known to be stereo:
+const STEREO_FRAMES = 128;
+// The delay will be increased at this frame to switch DelayNode output back
+// to mono.
+const MONO_OUTPUT_START_FRAME =
+ DELAY_FRAMES + INTERPOLATION_GRACE + STEREO_FRAMES;
+// Number of frames of output that are known to be mono after the known stereo
+// and interpolation grace.
+const MONO_FRAMES = 128;
+// Total length allows for interpolation after effects of stereo input are
+// finished and one block to test return to mono output:
+const TOTAL_LENGTH =
+ MONO_OUTPUT_START_FRAME + INTERPOLATION_GRACE + MONO_FRAMES;
+// maxDelayTime, is a multiple of block size, because the Gecko implementation
+// once had a bug with delayTime = maxDelayTime in this situation:
+const MAX_DELAY_FRAMES = TOTAL_LENGTH + INTERPOLATION_GRACE;
+
+promise_test(() => {
+ let context = new OfflineAudioContext({numberOfChannels: 1,
+ length: TOTAL_LENGTH,
+ sampleRate: SAMPLE_RATE});
+
+ // Only channel 1 of the splitter is connected to the destination.
+ let splitter = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ splitter.connect(context.destination, 1);
+
+ // A gain node has channelCountMode "max" and channelInterpretation
+ // "speakers", and so will up-mix a mono input when there is stereo input.
+ let gain = new GainNode(context);
+ gain.connect(splitter);
+
+ // The delay node initially outputs a single channel of silence, when it
+ // does not have enough signal in its history to output what it has
+ // previously received. After the delay period, it will then output the
+ // stereo signal it received.
+ let delay =
+ new DelayNode(context,
+ {maxDelayTime: MAX_DELAY_FRAMES / context.sampleRate,
+ delayTime: DELAY_FRAMES / context.sampleRate});
+ // Schedule an increase in the delay to return to mono silent output from
+ // the unfilled portion of the DelayNode's buffer.
+ delay.delayTime.setValueAtTime(MAX_DELAY_FRAMES / context.sampleRate,
+ MONO_OUTPUT_START_FRAME / context.sampleRate);
+ delay.connect(gain);
+
+ let stereoMerger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ stereoMerger.connect(delay);
+
+ let leftOffset = 0.125;
+ let rightOffset = 0.5;
+ let leftSource = new ConstantSourceNode(context, {offset: leftOffset});
+ let rightSource = new ConstantSourceNode(context, {offset: rightOffset});
+ leftSource.start();
+ rightSource.start();
+ leftSource.connect(stereoMerger, 0, 0);
+ rightSource.connect(stereoMerger, 0, 1);
+ // Connect a mono source directly to the gain, so that even stereo silence
+ // will be detected in channel 1 of the gain output because it will cause
+ // the mono source to be up-mixed.
+ let monoOffset = 0.25
+ let monoSource = new ConstantSourceNode(context, {offset: monoOffset});
+ monoSource.start();
+ monoSource.connect(gain);
+
+ return context.startRendering().
+ then((buffer) => {
+ let output = buffer.getChannelData(0);
+
+ function assert_samples_equal(startIndex, length, expected, description)
+ {
+ for (let i = startIndex; i < startIndex + length; ++i) {
+ assert_equals(output[i], expected, description + ` at ${i}`);
+ }
+ }
+
+ assert_samples_equal(0, DELAY_FRAMES - INTERPOLATION_GRACE,
+ 0, "Initial mono");
+ assert_samples_equal(DELAY_FRAMES + INTERPOLATION_GRACE, STEREO_FRAMES,
+ monoOffset + rightOffset, "Stereo");
+ assert_samples_equal(MONO_OUTPUT_START_FRAME + INTERPOLATION_GRACE,
+ MONO_FRAMES,
+ 0, "Final mono");
+ });
+});
+
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-default-delay.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-default-delay.html
new file mode 100644
index 0000000000..ef526c96ff
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-default-delay.html
@@ -0,0 +1,49 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ delaynode-max-default-delay.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/delay-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'DelayNode with delay set to default maximum delay'
+ },
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer = createToneBuffer(
+ context, 20, 20 * toneLengthSeconds, sampleRate); // 20Hz tone
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+
+ let delay = context.createDelay();
+ delayTimeSeconds = 1;
+ delay.delayTime.value = delayTimeSeconds;
+
+ bufferSource.connect(delay);
+ delay.connect(context.destination);
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => checkDelayedResult(buffer, toneBuffer, should))
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-nondefault-delay.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-nondefault-delay.html
new file mode 100644
index 0000000000..3be07255e1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-max-nondefault-delay.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ delaynode-max-nondefault-delay.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/delay-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'DelayNode with delay set to non-default maximum delay'
+ },
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer = createToneBuffer(
+ context, 20, 20 * toneLengthSeconds, sampleRate); // 20Hz tone
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+
+ let maxDelay = 1.5;
+ let delay = context.createDelay(maxDelay);
+ delayTimeSeconds = maxDelay;
+ delay.delayTime.value = delayTimeSeconds;
+
+ bufferSource.connect(delay);
+ delay.connect(context.destination);
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => checkDelayedResult(buffer, toneBuffer, should))
+ .then(() => task.done());
+ ;
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelay.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelay.html
new file mode 100644
index 0000000000..a43ceeb7be
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelay.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ delaynode-maxdelay.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/delay-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Basic functionality of DelayNode with a non-default max delay time'
+ },
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer = createToneBuffer(
+ context, 20, 20 * toneLengthSeconds, sampleRate); // 20Hz tone
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+
+ // Create a delay node with an explicit max delay time (greater than
+ // the default of 1 second).
+ let delay = context.createDelay(2);
+ // Set the delay time to a value greater than the default max delay
+ // so we can verify the delay is working for this case.
+ delayTimeSeconds = 1.5;
+ delay.delayTime.value = delayTimeSeconds;
+
+ bufferSource.connect(delay);
+ delay.connect(context.destination);
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => checkDelayedResult(buffer, toneBuffer, should))
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelaylimit.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelaylimit.html
new file mode 100644
index 0000000000..caf2f85dfd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-maxdelaylimit.html
@@ -0,0 +1,68 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ delaynode-maxdelaylimit.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/delay-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Tests attribute and maximum allowed delay of DelayNode'
+ },
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer = createToneBuffer(
+ context, 20, 20 * toneLengthSeconds, sampleRate); // 20Hz tone
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+
+ window.context = context;
+ should(() => context.createDelay(180),
+ 'Setting Delay length to 180 seconds or more')
+ .throw(DOMException, 'NotSupportedError');
+ should(() => context.createDelay(0),
+ 'Setting Delay length to 0 seconds')
+ .throw(DOMException, 'NotSupportedError');
+ should(() => context.createDelay(-1),
+ 'Setting Delay length to negative')
+ .throw(DOMException, 'NotSupportedError');
+ should(() => context.createDelay(NaN),
+ 'Setting Delay length to NaN')
+ .throw(TypeError);
+
+ let delay = context.createDelay(179);
+ delay.delayTime.value = delayTimeSeconds;
+ window.delay = delay;
+ should(
+ delay.delayTime.value,
+ 'delay.delayTime.value = ' + delayTimeSeconds)
+ .beEqualTo(delayTimeSeconds);
+
+ bufferSource.connect(delay);
+ delay.connect(context.destination);
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => checkDelayedResult(buffer, toneBuffer, should))
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-scheduling.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-scheduling.html
new file mode 100644
index 0000000000..af6c54950a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode-scheduling.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ delaynode-scheduling.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/delay-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'DelayNode delayTime parameter can be scheduled at a given time'
+ },
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer = createToneBuffer(
+ context, 20, 20 * toneLengthSeconds, sampleRate); // 20Hz tone
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+
+ let delay = context.createDelay();
+
+ // Schedule delay time at time zero.
+ delay.delayTime.setValueAtTime(delayTimeSeconds, 0);
+
+ bufferSource.connect(delay);
+ delay.connect(context.destination);
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => checkDelayedResult(buffer, toneBuffer, should))
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode.html
new file mode 100644
index 0000000000..da508e439f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/delaynode.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ delaynode.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/delay-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Tests attribute and basic functionality of DelayNode'
+ },
+ function(task, should) {
+
+ // Create offline audio context.
+ let context = new OfflineAudioContext(
+ 1, sampleRate * renderLengthSeconds, sampleRate);
+ let toneBuffer = createToneBuffer(
+ context, 20, 20 * toneLengthSeconds, sampleRate); // 20Hz tone
+
+ let bufferSource = context.createBufferSource();
+ bufferSource.buffer = toneBuffer;
+
+ let delay = context.createDelay();
+
+ window.delay = delay;
+ should(delay.numberOfInputs, 'delay.numberOfInputs').beEqualTo(1);
+ should(delay.numberOfOutputs, 'delay.numberOfOutputs').beEqualTo(1);
+ should(delay.delayTime.defaultValue, 'delay.delayTime.defaultValue')
+ .beEqualTo(0.0);
+ should(delay.delayTime.value, 'delay.delayTime.value')
+ .beEqualTo(0.0);
+
+ delay.delayTime.value = delayTimeSeconds;
+ should(
+ delay.delayTime.value,
+ 'delay.delayTime.value = ' + delayTimeSeconds)
+ .beEqualTo(delayTimeSeconds);
+
+ bufferSource.connect(delay);
+ delay.connect(context.destination);
+ bufferSource.start(0);
+
+ context.startRendering()
+ .then(buffer => checkDelayedResult(buffer, toneBuffer, should))
+ .then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/maxdelay-rounding.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/maxdelay-rounding.html
new file mode 100644
index 0000000000..84d9f18138
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/maxdelay-rounding.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test DelayNode when maxDelayTime requires rounding
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 44100;
+ let inputLengthSeconds = 1;
+ let renderLengthSeconds = 2;
+
+ // Delay for one second plus 0.4 of a sample frame, to test that
+ // DelayNode is properly rounding up when calculating its buffer
+ // size (crbug.com/1065110).
+ let delayTimeSeconds = 1 + 0.4 / sampleRate;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'maxdelay-rounding',
+ description: 'Test DelayNode when maxDelayTime requires rounding',
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext({
+ numberOfChannels: 1,
+ length: sampleRate * renderLengthSeconds,
+ sampleRate: sampleRate,
+ });
+
+ // Create a constant source to use as input.
+ let src = new ConstantSourceNode(context);
+
+ // Create a DelayNode to delay for delayTimeSeconds.
+ let delay = new DelayNode(context, {
+ maxDelayTime: delayTimeSeconds,
+ delayTime: delayTimeSeconds,
+ });
+
+ src.connect(delay).connect(context.destination);
+
+ src.start();
+ context.startRendering()
+ .then(renderedBuffer => {
+ let renderedData = renderedBuffer.getChannelData(0);
+
+ // The first delayTimeSeconds of output should be silent.
+ let expectedSilentFrames = Math.floor(
+ delayTimeSeconds * sampleRate);
+
+ should(
+ renderedData.slice(0, expectedSilentFrames),
+ `output[0:${expectedSilentFrames - 1}]`)
+ .beConstantValueOf(0);
+
+ // The rest should be non-silent: that is, there should
+ // be at least one non-zero sample. (Any reasonable
+ // interpolation algorithm will make all these samples
+ // non-zero, but I don't think that's guaranteed by the
+ // spec, so we use a conservative test for now.)
+ should(
+ renderedData.slice(expectedSilentFrames),
+ `output[${expectedSilentFrames}:]`)
+ .notBeConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html
new file mode 100644
index 0000000000..ccca103a3b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html
@@ -0,0 +1,184 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test DelayNode Has No Dezippering
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // The sample rate must be a power of two to avoid any round-off errors in
+ // computing when to suspend a context on a rendering quantum boundary.
+ // Otherwise this is pretty arbitrary.
+ let sampleRate = 16384;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test0', description: 'Test DelayNode has no dezippering'},
+ (task, should) => {
+ let context = new OfflineAudioContext(1, sampleRate, sampleRate);
+
+ // Simple integer ramp for testing delay node
+ let buffer = new AudioBuffer(
+ {length: context.length, sampleRate: context.sampleRate});
+ let rampData = buffer.getChannelData(0);
+ for (let k = 0; k < rampData.length; ++k) {
+ rampData[k] = k + 1;
+ }
+
+ // |delay0Frame| is the initial delay in frames. |delay1Frame| is
+ // the new delay in frames. These must be integers.
+ let delay0Frame = 64;
+ let delay1Frame = 16;
+
+ let src = new AudioBufferSourceNode(context, {buffer: buffer});
+ let delay = new DelayNode(
+ context, {delayTime: delay0Frame / context.sampleRate});
+
+ src.connect(delay).connect(context.destination);
+
+ // After a render quantum, change the delay to |delay1Frame|.
+ context.suspend(RENDER_QUANTUM_FRAMES / context.sampleRate)
+ .then(() => {
+ delay.delayTime.value = delay1Frame / context.sampleRate;
+ })
+ .then(() => context.resume());
+
+ src.start();
+ context.startRendering()
+ .then(renderedBuffer => {
+ let renderedData = renderedBuffer.getChannelData(0);
+
+ // The first |delay0Frame| frames should be zero.
+ should(
+ renderedData.slice(0, delay0Frame),
+ 'output[0:' + (delay0Frame - 1) + ']')
+ .beConstantValueOf(0);
+
+ // Now we have the ramp should show up from the delay.
+ let ramp0 =
+ new Float32Array(RENDER_QUANTUM_FRAMES - delay0Frame);
+ for (let k = 0; k < ramp0.length; ++k) {
+ ramp0[k] = rampData[k];
+ }
+
+ should(
+ renderedData.slice(delay0Frame, RENDER_QUANTUM_FRAMES),
+ 'output[' + delay0Frame + ':' +
+ (RENDER_QUANTUM_FRAMES - 1) + ']')
+ .beEqualToArray(ramp0);
+
+ // After one rendering quantum, the delay is changed to
+ // |delay1Frame|.
+ let ramp1 =
+ new Float32Array(context.length - RENDER_QUANTUM_FRAMES);
+ for (let k = 0; k < ramp1.length; ++k) {
+ // ramp1[k] = 1 + k + RENDER_QUANTUM_FRAMES - delay1Frame;
+ ramp1[k] =
+ rampData[k + RENDER_QUANTUM_FRAMES - delay1Frame];
+ }
+ should(
+ renderedData.slice(RENDER_QUANTUM_FRAMES),
+ 'output[' + RENDER_QUANTUM_FRAMES + ':]')
+ .beEqualToArray(ramp1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'test1', description: 'Test value setter and setValueAtTime'},
+ (task, should) => {
+ testWithAutomation(should, {prefix: '', threshold: 6.5819e-5})
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'test2', description: 'Test value setter and modulation'},
+ (task, should) => {
+ testWithAutomation(should, {
+ prefix: 'With modulation: ',
+ modulator: true
+ }).then(() => task.done());
+ });
+
+ // Compare .value setter with setValueAtTime, Optionally allow modulation
+ // of |delayTime|.
+ function testWithAutomation(should, options) {
+ let prefix = options.prefix;
+ // Channel 0 is the output of delay node using the setter and channel 1
+ // is the output using setValueAtTime.
+ let context = new OfflineAudioContext(2, sampleRate, sampleRate);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ // |delay0Frame| is the initial delay value in frames. |delay1Frame| is
+ // the new delay in frames. The values here are constrained only by the
+ // constraints for a DelayNode. These are pretty arbitrary except we
+ // wanted them to be fractional so as not be on a frame boundary to
+ // test interpolation compared with |setValueAtTime()|..
+ let delay0Frame = 3.1;
+ let delay1Frame = 47.2;
+
+ let delayTest = new DelayNode(
+ context, {delayTime: delay0Frame / context.sampleRate});
+ let delayRef = new DelayNode(
+ context, {delayTime: delay0Frame / context.sampleRate});
+
+ src.connect(delayTest).connect(merger, 0, 0);
+ src.connect(delayRef).connect(merger, 0, 1);
+
+ if (options.modulator) {
+ // Fairly arbitrary modulation of the delay time, with a peak
+ // variation of 10 ms.
+ let mod = new OscillatorNode(context, {frequency: 1000});
+ let modGain = new GainNode(context, {gain: .01});
+ mod.connect(modGain);
+ modGain.connect(delayTest.delayTime);
+ modGain.connect(delayRef.delayTime);
+ mod.start();
+ }
+
+ // The time at which the delay time of |delayTest| node will be
+ // changed. This MUST be on a render quantum boundary, but is
+ // otherwise arbitrary.
+ let changeTime = 3 * RENDER_QUANTUM_FRAMES / context.sampleRate;
+
+ // Schedule the delay change on |delayRef| and also apply the value
+ // setter for |delayTest| at |changeTime|.
+ delayRef.delayTime.setValueAtTime(
+ delay1Frame / context.sampleRate, changeTime);
+ context.suspend(changeTime)
+ .then(() => {
+ delayTest.delayTime.value = delay1Frame / context.sampleRate;
+ })
+ .then(() => context.resume());
+
+ src.start();
+
+ return context.startRendering().then(renderedBuffer => {
+ let actual = renderedBuffer.getChannelData(0);
+ let expected = renderedBuffer.getChannelData(1);
+
+ let match = should(actual, prefix + '.value setter output')
+ .beCloseToArray(
+ expected, {absoluteThreshold: options.threshold});
+ should(
+ match,
+ prefix + '.value setter output matches setValueAtTime output')
+ .beTrue();
+ });
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html b/testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html
new file mode 100644
index 0000000000..1af0e0f010
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ AudioDestinationNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body>
+ <script>
+ function assert_doesnt_throw(f, desc) {
+ try {
+ f();
+ } catch (e) {
+ assert_true(false, desc);
+ return;
+ }
+ assert_true(true, desc);
+ }
+
+ test(function() {
+ var ac = new AudioContext();
+
+ assert_equals(ac.destination.channelCount, 2,
+ "A DestinationNode should have two channels by default");
+
+ assert_greater_than_equal(ac.destination.maxChannelCount, 2,
+ "maxChannelCount should be >= 2");
+
+ assert_throws_dom("IndexSizeError", function() {
+ ac.destination.channelCount = ac.destination.maxChannelCount + 1
+ }, `Setting the channelCount to something greater than
+ the maxChannelCount should throw IndexSizeError`);
+
+ assert_throws_dom("NotSupportedError", function() {
+ ac.destination.channelCount = 0;
+ }, "Setting the channelCount to 0 should throw NotSupportedError");
+
+ assert_doesnt_throw(function() {
+ ac.destination.channelCount = ac.destination.maxChannelCount;
+ }, "Setting the channelCount to maxChannelCount should not throw");
+
+ assert_doesnt_throw(function() {
+ ac.destination.channelCount = 1;
+ }, "Setting the channelCount to 1 should not throw");
+ });
+
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/ctor-dynamicscompressor.html b/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/ctor-dynamicscompressor.html
new file mode 100644
index 0000000000..c2460dfa1d
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/ctor-dynamicscompressor.html
@@ -0,0 +1,199 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: DynamicsCompressor
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'DynamicsCompressorNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node =
+ testDefaultConstructor(should, 'DynamicsCompressorNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [
+ {name: 'threshold', value: -24}, {name: 'knee', value: 30},
+ {name: 'ratio', value: 12}, {name: 'reduction', value: 0},
+ {name: 'attack', value: Math.fround(0.003)},
+ {name: 'release', value: 0.25}
+ ]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ // Can't use testAudioNodeOptions because the constraints for this node
+ // are not supported there.
+
+ // Array of test options to be run. Each entry is a dictionary where
+ // |testAttribute| is the name of the attribute to be tested,
+ // |testValue| is the value to be used, and |expectedErrorType| is the
+ // error type if the test is expected to throw an error.
+ // |expectedErrorType| should be set only if the test does throw.
+ let testOptions = [
+ // Test channel count
+ {
+ testAttribute: 'channelCount',
+ testValue: 1,
+ },
+ {
+ testAttribute: 'channelCount',
+ testValue: 2,
+ },
+ {
+ testAttribute: 'channelCount',
+ testValue: 0,
+ expectedErrorType: 'NotSupportedError'
+ },
+ {
+ testAttribute: 'channelCount',
+ testValue: 3,
+ expectedErrorType: 'NotSupportedError'
+ },
+ {
+ testAttribute: 'channelCount',
+ testValue: 99,
+ expectedErrorType: 'NotSupportedError'
+ },
+ // Test channel count mode
+ {
+ testAttribute: 'channelCountMode',
+ testValue: 'clamped-max',
+ },
+ {
+ testAttribute: 'channelCountMode',
+ testValue: 'explicit',
+ },
+ {
+ testAttribute: 'channelCountMode',
+ testValue: 'max',
+ expectedErrorType: 'NotSupportedError'
+ },
+ {
+ testAttribute: 'channelCountMode',
+ testValue: 'foobar',
+ expectedErrorType: TypeError
+ },
+ // Test channel interpretation
+ {
+ testAttribute: 'channelInterpretation',
+ testValue: 'speakers',
+ },
+ {
+ testAttribute: 'channelInterpretation',
+ testValue: 'discrete',
+ },
+ {
+ testAttribute: 'channelInterpretation',
+ testValue: 'foobar',
+ expectedErrorType: TypeError
+ }
+ ];
+
+ testOptions.forEach((option) => {
+ let nodeOptions = {};
+ nodeOptions[option.testAttribute] = option.testValue;
+
+ testNode(should, context, {
+ nodeOptions: nodeOptions,
+ testAttribute: option.testAttribute,
+ expectedValue: option.testValue,
+ expectedErrorType: option.expectedErrorType
+ });
+ });
+
+ task.done();
+ });
+
+ audit.define('constructor with options', (task, should) => {
+ let node;
+ let options =
+ {threshold: -33, knee: 15, ratio: 7, attack: 0.625, release: 0.125};
+
+ should(
+ () => {
+ node = new DynamicsCompressorNode(context, options);
+ },
+ 'node1 = new DynamicsCompressorNode(c, ' + JSON.stringify(options) +
+ ')')
+ .notThrow();
+ should(
+ node instanceof DynamicsCompressorNode,
+ 'node1 instanceof DynamicsCompressorNode')
+ .beEqualTo(true);
+
+ should(node.threshold.value, 'node1.threshold.value')
+ .beEqualTo(options.threshold);
+ should(node.knee.value, 'node1.knee.value').beEqualTo(options.knee);
+ should(node.ratio.value, 'node1.ratio.value').beEqualTo(options.ratio);
+ should(node.attack.value, 'node1.attack.value')
+ .beEqualTo(options.attack);
+ should(node.release.value, 'node1.release.value')
+ .beEqualTo(options.release);
+
+ should(node.channelCount, 'node1.channelCount').beEqualTo(2);
+ should(node.channelCountMode, 'node1.channelCountMode')
+ .beEqualTo('clamped-max');
+ should(node.channelInterpretation, 'node1.channelInterpretation')
+ .beEqualTo('speakers');
+
+ task.done();
+ });
+
+ audit.run();
+
+ // Test possible options for DynamicsCompressor constructor.
+ function testNode(should, context, options) {
+ // Node to be tested
+ let node;
+
+ let createNodeFunction = () => {
+ return () => node =
+ new DynamicsCompressorNode(context, options.nodeOptions);
+ };
+
+ let message = 'new DynamicsCompressorNode(c, ' +
+ JSON.stringify(options.nodeOptions) + ')';
+
+ if (options.expectedErrorType === TypeError) {
+ should(createNodeFunction(), message)
+ .throw(options.expectedErrorType);
+ } else if (options.expectedErrorType === 'NotSupportedError') {
+ should(createNodeFunction(), message)
+ .throw(DOMException, 'NotSupportedError');
+ } else {
+ should(createNodeFunction(), message).notThrow();
+ should(node[options.testAttribute], 'node.' + options.testAttribute)
+ .beEqualTo(options.expectedValue);
+ }
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/dynamicscompressor-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/dynamicscompressor-basic.html
new file mode 100644
index 0000000000..6c602010d0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/dynamicscompressor-basic.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ dynamicscompressor-basic.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let context;
+ let compressor;
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Basic tests for DynamicsCompressorNode API'
+ },
+ function(task, should) {
+
+ context = new AudioContext();
+ compressor = context.createDynamicsCompressor();
+
+ should(compressor.threshold.value, 'compressor.threshold.value')
+ .beEqualTo(-24);
+ should(compressor.knee.value, 'compressor.knee.value')
+ .beEqualTo(30);
+ should(compressor.ratio.value, 'compressor.ratio.value')
+ .beEqualTo(12);
+ should(compressor.attack.value, 'compressor.attack.value')
+ .beEqualTo(Math.fround(0.003));
+ should(compressor.release.value, 'compressor.release.value')
+ .beEqualTo(0.25);
+ should(typeof compressor.reduction, 'typeof compressor.reduction')
+ .beEqualTo('number');
+ should(compressor.reduction, 'compressor.reduction').beEqualTo(0);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/ctor-gain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/ctor-gain.html
new file mode 100644
index 0000000000..dec273e969
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/ctor-gain.html
@@ -0,0 +1,79 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: Gain
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'GainNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'GainNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [{name: 'gain', value: 1}]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'GainNode');
+ task.done();
+ });
+
+ audit.define('constructor with options', (task, should) => {
+ let node;
+ let options = {
+ gain: -2,
+ };
+
+ should(
+ () => {
+ node = new GainNode(context, options);
+ },
+ 'node1 = new GainNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node instanceof GainNode, 'node1 instanceof GainNode')
+ .beEqualTo(true);
+
+ should(node.gain.value, 'node1.gain.value').beEqualTo(options.gain);
+
+ should(node.channelCount, 'node1.channelCount').beEqualTo(2);
+ should(node.channelCountMode, 'node1.channelCountMode')
+ .beEqualTo('max');
+ should(node.channelInterpretation, 'node1.channelInterpretation')
+ .beEqualTo('speakers');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-basic.html
new file mode 100644
index 0000000000..de2ba11a7f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-basic.html
@@ -0,0 +1,37 @@
+<!DOCTYPE html>
+<!--
+Verifies GainNode attributes and their type.
+-->
+<html>
+ <head>
+ <title>
+ gain-basic.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('test', function(task, should) {
+ // Create audio context.
+ let context = new AudioContext();
+
+ // Create gain node.
+ let gainNode = context.createGain();
+
+ should(
+ gainNode.gain instanceof AudioParam,
+ 'gainNode.gain instanceof AudioParam')
+ .beTrue();
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html
new file mode 100644
index 0000000000..c41f4c9080
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain.html
@@ -0,0 +1,162 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Basic GainNode Functionality
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Tests that GainNode is properly scaling the gain. We'll render 11
+ // notes, starting at a gain of 1.0, decreasing in gain by 0.1. The 11th
+ // note will be of gain 0.0, so it should be silent (at the end in the
+ // rendered output).
+
+ let audit = Audit.createTaskRunner();
+
+ // Use a power of two to eliminate any round-off when converting frame to
+ // time.
+ let sampleRate = 32768;
+ // Make sure the buffer duration and spacing are all exact frame lengths
+ // so that the note spacing is also on frame boundaries to eliminate
+ // sub-sample accurate start of a ABSN.
+ let bufferDurationSeconds = Math.floor(0.125 * sampleRate) / sampleRate;
+ let numberOfNotes = 11;
+ // Leave about 20ms of silence, being sure this is an exact frame
+ // duration.
+ let noteSilence = Math.floor(0.020 * sampleRate) / sampleRate;
+ let noteSpacing = bufferDurationSeconds + noteSilence;
+
+ let lengthInSeconds = numberOfNotes * noteSpacing;
+
+ let context = 0;
+ let sinWaveBuffer = 0;
+
+ // Create a stereo AudioBuffer of duration |lengthInSeconds| consisting of
+ // a pure sine wave with the given |frequency|. Both channels contain the
+ // same data.
+ function createSinWaveBuffer(lengthInSeconds, frequency) {
+ let audioBuffer =
+ context.createBuffer(2, lengthInSeconds * sampleRate, sampleRate);
+
+ let n = audioBuffer.length;
+ let channelL = audioBuffer.getChannelData(0);
+ let channelR = audioBuffer.getChannelData(1);
+
+ for (let i = 0; i < n; ++i) {
+ channelL[i] = Math.sin(frequency * 2.0 * Math.PI * i / sampleRate);
+ channelR[i] = channelL[i];
+ }
+
+ return audioBuffer;
+ }
+
+ function playNote(time, gain, merger) {
+ let source = context.createBufferSource();
+ source.buffer = sinWaveBuffer;
+
+ let gainNode = context.createGain();
+ gainNode.gain.value = gain;
+
+ let sourceSplitter = context.createChannelSplitter(2);
+ let gainSplitter = context.createChannelSplitter(2);
+
+ // Split the stereo channels from the source output and the gain output
+ // and merge them into the desired channels of the merger.
+ source.connect(gainNode).connect(gainSplitter);
+ source.connect(sourceSplitter);
+
+ gainSplitter.connect(merger, 0, 0);
+ gainSplitter.connect(merger, 1, 1);
+ sourceSplitter.connect(merger, 0, 2);
+ sourceSplitter.connect(merger, 1, 3);
+
+ source.start(time);
+ }
+
+ audit.define(
+ {label: 'create context', description: 'Create context for test'},
+ function(task, should) {
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 4, sampleRate * lengthInSeconds, sampleRate);
+ task.done();
+ });
+
+ audit.define(
+ {label: 'test', description: 'GainNode functionality'},
+ function(task, should) {
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // Create a buffer for a short "note".
+ sinWaveBuffer = createSinWaveBuffer(bufferDurationSeconds, 880.0);
+
+ let startTimes = [];
+ let gainValues = [];
+
+ // Render 11 notes, starting at a gain of 1.0, decreasing in gain by
+ // 0.1. The last note will be of gain 0.0, so shouldn't be
+ // perceptible in the rendered output.
+ for (let i = 0; i < numberOfNotes; ++i) {
+ let time = i * noteSpacing;
+ let gain = 1.0 - i / (numberOfNotes - 1);
+ startTimes.push(time);
+ gainValues.push(gain);
+ playNote(time, gain, merger);
+ }
+
+ context.startRendering()
+ .then(buffer => {
+ let actual0 = buffer.getChannelData(0);
+ let actual1 = buffer.getChannelData(1);
+ let reference0 = buffer.getChannelData(2);
+ let reference1 = buffer.getChannelData(3);
+
+ // It's ok to a frame too long since the sine pulses are
+ // followed by silence.
+ let bufferDurationFrames =
+ Math.ceil(bufferDurationSeconds * context.sampleRate);
+
+ // Apply the gains to the reference signal.
+ for (let k = 0; k < startTimes.length; ++k) {
+ // It's ok to be a frame early because the sine pulses are
+ // preceded by silence.
+ let startFrame =
+ Math.floor(startTimes[k] * context.sampleRate);
+ let gain = gainValues[k];
+ for (let n = 0; n < bufferDurationFrames; ++n) {
+ reference0[startFrame + n] *= gain;
+ reference1[startFrame + n] *= gain;
+ }
+ }
+
+ // Verify the channels are clsoe to the reference.
+ should(actual0, 'Left output from gain node')
+ .beCloseToArray(
+ reference0, {relativeThreshold: 1.1877e-7});
+ should(actual1, 'Right output from gain node')
+ .beCloseToArray(
+ reference1, {relativeThreshold: 1.1877e-7});
+
+ // Test the SNR too for both channels.
+ let snr0 = 10 * Math.log10(computeSNR(actual0, reference0));
+ let snr1 = 10 * Math.log10(computeSNR(actual1, reference1));
+ should(snr0, 'Left SNR (in dB)')
+ .beGreaterThanOrEqualTo(148.71);
+ should(snr1, 'Right SNR (in dB)')
+ .beGreaterThanOrEqualTo(148.71);
+ })
+ .then(() => task.done());
+ ;
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html
new file mode 100644
index 0000000000..6326d00dfb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html
@@ -0,0 +1,121 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Gain Dezippering Test: Dezippering Removed
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test0', description: 'Dezippering of GainNode removed'},
+ (task, should) => {
+ // Only need a few frames to verify that dezippering has been
+ // removed from the GainNode. Sample rate is pretty arbitrary.
+ let context = new OfflineAudioContext(1, 1024, 16000);
+
+ // Send a unit source to the gain node so we can measure the effect
+ // of the gain node.
+ let src = new ConstantSourceNode(context, {offset: 1});
+ let g = new GainNode(context, {gain: 1});
+ src.connect(g).connect(context.destination);
+
+ context.suspend(RENDER_QUANTUM_FRAMES / context.sampleRate)
+ .then(() => {
+ g.gain.value = .5;
+ })
+ .then(() => context.resume());
+
+ src.start();
+
+ context.startRendering()
+ .then(audio => {
+ let c = audio.getChannelData(0);
+
+ // If dezippering has been removed, the gain output should
+ // instantly jump at frame 128 to 0.5.
+ should(c.slice(0, 128), 'output[0:127]').beConstantValueOf(1);
+ should(c.slice(128), 'output[128:]').beConstantValueOf(0.5);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test2',
+ description: 'Compare value setter and setValueAtTime'
+ },
+ (task, should) => {
+ testWithAutomation(should, {prefix: ''}).then(() => task.done());
+ });
+
+ audit.define(
+ {label: 'test3', description: 'Automation effects'},
+ (task, should) => {
+ testWithAutomation(should, {
+ prefix: 'With modulation: ',
+ modulator: true
+ }).then(() => task.done());
+ });
+
+ audit.run();
+
+ function testWithAutomation(should, options) {
+ // Sample rate must be a power of two to eliminate round-off in
+ // computing the time at render quantum boundaries.
+ let context = new OfflineAudioContext(2, 1024, 16384);
+ let merger = new ChannelMergerNode(context, {numberOfChannels: 2});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+ let gainTest = new GainNode(context);
+ let gainRef = new GainNode(context);
+
+ src.connect(gainTest).connect(merger, 0, 0);
+ src.connect(gainRef).connect(merger, 0, 1);
+
+ if (options.modulator) {
+ let mod = new OscillatorNode(context, {frequency: 1000});
+ let modGain = new GainNode(context);
+ mod.connect(modGain);
+ modGain.connect(gainTest.gain);
+ modGain.connect(gainRef.gain);
+ mod.start();
+ }
+
+ // Change the gains. Must do the change on a render boundary!
+ let changeTime = 3 * RENDER_QUANTUM_FRAMES / context.sampleRate;
+ let newGain = .3;
+
+ gainRef.gain.setValueAtTime(newGain, changeTime);
+ context.suspend(changeTime)
+ .then(() => gainTest.gain.value = newGain)
+ .then(() => context.resume());
+
+ src.start();
+
+ return context.startRendering().then(audio => {
+ let actual = audio.getChannelData(0);
+ let expected = audio.getChannelData(1);
+
+ // The values using the .value setter must be identical to the
+ // values using setValueAtTime.
+ let match = should(actual, options.prefix + '.value setter output')
+ .beEqualToArray(expected);
+
+ should(
+ match,
+ options.prefix +
+ '.value setter output matches setValueAtTime output')
+ .beTrue();
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/ctor-iirfilter.html b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/ctor-iirfilter.html
new file mode 100644
index 0000000000..e884d487af
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/ctor-iirfilter.html
@@ -0,0 +1,126 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: IIRFilter
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'IIRFilterNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'IIRFilterNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers',
+ constructorOptions: {feedforward: [1], feedback: [1, -.9]}
+ });
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(
+ should, context, 'IIRFilterNode',
+ {additionalOptions: {feedforward: [1, 1], feedback: [1, .5]}});
+ task.done();
+ });
+
+ audit.define('constructor options', (task, should) => {
+ let node;
+
+ let options = {feedback: [1, .5]};
+ should(
+ () => {
+ node = new IIRFilterNode(context, options);
+ },
+ 'node = new IIRFilterNode(, ' + JSON.stringify(options) + ')')
+ .throw(TypeError);
+
+ options = {feedforward: [1, 0.5]};
+ should(
+ () => {
+ node = new IIRFilterNode(context, options);
+ },
+ 'node = new IIRFilterNode(c, ' + JSON.stringify(options) + ')')
+ .throw(TypeError);
+
+ task.done();
+ });
+
+ // Test functionality of constructor. This is needed because we have no
+ // way of determining if the filter coefficients were were actually set
+ // appropriately.
+
+ // TODO(rtoy): This functionality test should be moved out to a separate
+ // file.
+ audit.define('functionality', (task, should) => {
+ let options = {feedback: [1, .5], feedforward: [1, 1]};
+
+ // Create two-channel offline context; sample rate and length are fairly
+ // arbitrary. Channel 0 contains the test output and channel 1 contains
+ // the expected output.
+ let sampleRate = 48000;
+ let renderLength = 0.125;
+ let testContext =
+ new OfflineAudioContext(2, renderLength * sampleRate, sampleRate);
+
+ // The test node uses the constructor. The reference node creates the
+ // same filter but uses the old factory method.
+ let testNode = new IIRFilterNode(testContext, options);
+ let refNode = testContext.createIIRFilter(
+ Float32Array.from(options.feedforward),
+ Float32Array.from(options.feedback));
+
+ let source = testContext.createOscillator();
+ source.connect(testNode);
+ source.connect(refNode);
+
+ let merger = testContext.createChannelMerger(
+ testContext.destination.channelCount);
+
+ testNode.connect(merger, 0, 0);
+ refNode.connect(merger, 0, 1);
+
+ merger.connect(testContext.destination);
+
+ source.start();
+ testContext.startRendering()
+ .then(function(resultBuffer) {
+ let actual = resultBuffer.getChannelData(0);
+ let expected = resultBuffer.getChannelData(1);
+
+ // The output from the two channels should be exactly equal
+ // because exactly the same IIR filter should have been created.
+ should(actual, 'Output of filter using new IIRFilter(...)')
+ .beEqualToArray(expected);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-basic.html
new file mode 100644
index 0000000000..7828f05226
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-basic.html
@@ -0,0 +1,204 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Basic IIRFilterNode Properties
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ let testFrames = 100;
+
+ // Global context that can be used by the individual tasks. It must be
+ // defined by the initialize task.
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ should(() => {
+ context = new OfflineAudioContext(1, testFrames, sampleRate);
+ }, 'Initialize context for testing').notThrow();
+ task.done();
+ });
+
+ audit.define('existence', (task, should) => {
+ should(context.createIIRFilter, 'context.createIIRFilter').exist();
+ task.done();
+ });
+
+ audit.define('parameters', (task, should) => {
+ // Create a really simple IIR filter. Doesn't much matter what.
+ let coef = Float32Array.from([1]);
+
+ let f = context.createIIRFilter(coef, coef);
+
+ should(f.numberOfInputs, 'numberOfInputs').beEqualTo(1);
+ should(f.numberOfOutputs, 'numberOfOutputs').beEqualTo(1);
+ should(f.channelCountMode, 'channelCountMode').beEqualTo('max');
+ should(f.channelInterpretation, 'channelInterpretation')
+ .beEqualTo('speakers');
+
+ task.done();
+ });
+
+ audit.define('exceptions-createIIRFilter', (task, should) => {
+ should(function() {
+ // Two args are required.
+ context.createIIRFilter();
+ }, 'createIIRFilter()').throw(TypeError);
+
+ should(function() {
+ // Two args are required.
+ context.createIIRFilter(new Float32Array(1));
+ }, 'createIIRFilter(new Float32Array(1))').throw(TypeError);
+
+ should(function() {
+ // null is not valid
+ context.createIIRFilter(null, null);
+ }, 'createIIRFilter(null, null)').throw(TypeError);
+
+ should(function() {
+ // There has to be at least one coefficient.
+ context.createIIRFilter([], []);
+ }, 'createIIRFilter([], [])').throw(DOMException, 'NotSupportedError');
+
+ should(function() {
+ // There has to be at least one coefficient.
+ context.createIIRFilter([1], []);
+ }, 'createIIRFilter([1], [])').throw(DOMException, 'NotSupportedError');
+
+ should(function() {
+ // There has to be at least one coefficient.
+ context.createIIRFilter([], [1]);
+ }, 'createIIRFilter([], [1])').throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ // Max allowed size for the coefficient arrays.
+ let fb = new Float32Array(20);
+ fb[0] = 1;
+ context.createIIRFilter(fb, fb);
+ },
+ 'createIIRFilter(new Float32Array(20), new Float32Array(20))')
+ .notThrow();
+
+ should(
+ function() {
+ // Max allowed size for the feedforward coefficient array.
+ let coef = new Float32Array(21);
+ coef[0] = 1;
+ context.createIIRFilter(coef, [1]);
+ },
+ 'createIIRFilter(new Float32Array(21), [1])')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ // Max allowed size for the feedback coefficient array.
+ let coef = new Float32Array(21);
+ coef[0] = 1;
+ context.createIIRFilter([1], coef);
+ },
+ 'createIIRFilter([1], new Float32Array(21))')
+ .throw(DOMException, 'NotSupportedError');
+
+ should(
+ function() {
+ // First feedback coefficient can't be 0.
+ context.createIIRFilter([1], new Float32Array(2));
+ },
+ 'createIIRFilter([1], new Float32Array(2))')
+ .throw(DOMException, 'InvalidStateError');
+
+ should(
+ function() {
+ // feedforward coefficients can't all be zero.
+ context.createIIRFilter(new Float32Array(10), [1]);
+ },
+ 'createIIRFilter(new Float32Array(10), [1])')
+ .throw(DOMException, 'InvalidStateError');
+
+ should(function() {
+ // Feedback coefficients must be finite.
+ context.createIIRFilter([1], [1, Infinity, NaN]);
+ }, 'createIIRFilter([1], [1, NaN, Infinity])').throw(TypeError);
+
+ should(function() {
+ // Feedforward coefficients must be finite.
+ context.createIIRFilter([1, Infinity, NaN], [1]);
+ }, 'createIIRFilter([1, NaN, Infinity], [1])').throw(TypeError);
+
+ should(function() {
+ // Test that random junk in the array is converted to NaN.
+ context.createIIRFilter([1, 'abc', []], [1]);
+ }, 'createIIRFilter([1, \'abc\', []], [1])').throw(TypeError);
+
+ task.done();
+ });
+
+ audit.define('exceptions-getFrequencyData', (task, should) => {
+ // Create a really simple IIR filter. Doesn't much matter what.
+ let coef = Float32Array.from([1]);
+
+ let f = context.createIIRFilter(coef, coef);
+
+ should(
+ function() {
+ // frequencyHz can't be null.
+ f.getFrequencyResponse(
+ null, new Float32Array(1), new Float32Array(1));
+ },
+ 'getFrequencyResponse(null, new Float32Array(1), new Float32Array(1))')
+ .throw(TypeError);
+
+ should(
+ function() {
+ // magResponse can't be null.
+ f.getFrequencyResponse(
+ new Float32Array(1), null, new Float32Array(1));
+ },
+ 'getFrequencyResponse(new Float32Array(1), null, new Float32Array(1))')
+ .throw(TypeError);
+
+ should(
+ function() {
+ // phaseResponse can't be null.
+ f.getFrequencyResponse(
+ new Float32Array(1), new Float32Array(1), null);
+ },
+ 'getFrequencyResponse(new Float32Array(1), new Float32Array(1), null)')
+ .throw(TypeError);
+
+ should(
+ function() {
+ // magResponse array must the same length as frequencyHz
+ f.getFrequencyResponse(
+ new Float32Array(10), new Float32Array(1),
+ new Float32Array(20));
+ },
+ 'getFrequencyResponse(new Float32Array(10), new Float32Array(1), new Float32Array(20))')
+ .throw(DOMException, 'InvalidAccessError');
+
+ should(
+ function() {
+ // phaseResponse array must be the same length as frequencyHz
+ f.getFrequencyResponse(
+ new Float32Array(10), new Float32Array(20),
+ new Float32Array(1));
+ },
+ 'getFrequencyResponse(new Float32Array(10), new Float32Array(20), new Float32Array(1))')
+ .throw(DOMException, 'InvalidAccessError');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-getFrequencyResponse.html b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-getFrequencyResponse.html
new file mode 100644
index 0000000000..c98555f161
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter-getFrequencyResponse.html
@@ -0,0 +1,159 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test IIRFilter getFrequencyResponse() functionality
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/biquad-filters.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ // Some short duration; we're not actually looking at the rendered output.
+ let testDurationSec = 0.01;
+
+ // Number of frequency samples to take.
+ let numberOfFrequencies = 1000;
+
+ let audit = Audit.createTaskRunner();
+
+
+ // Compute a set of linearly spaced frequencies.
+ function createFrequencies(nFrequencies, sampleRate) {
+ let frequencies = new Float32Array(nFrequencies);
+ let nyquist = sampleRate / 2;
+ let freqDelta = nyquist / nFrequencies;
+
+ for (let k = 0; k < nFrequencies; ++k) {
+ frequencies[k] = k * freqDelta;
+ }
+
+ return frequencies;
+ }
+
+ audit.define('1-pole IIR', (task, should) => {
+ let context = new OfflineAudioContext(
+ 1, testDurationSec * sampleRate, sampleRate);
+
+ let iir = context.createIIRFilter([1], [1, -0.9]);
+ let frequencies =
+ createFrequencies(numberOfFrequencies, context.sampleRate);
+
+ let iirMag = new Float32Array(numberOfFrequencies);
+ let iirPhase = new Float32Array(numberOfFrequencies);
+ let trueMag = new Float32Array(numberOfFrequencies);
+ let truePhase = new Float32Array(numberOfFrequencies);
+
+ // The IIR filter is
+ // H(z) = 1/(1 - 0.9*z^(-1)).
+ //
+ // The frequency response is
+ // H(exp(j*w)) = 1/(1 - 0.9*exp(-j*w)).
+ //
+ // Thus, the magnitude is
+ // |H(exp(j*w))| = 1/sqrt(1.81-1.8*cos(w)).
+ //
+ // The phase is
+ // arg(H(exp(j*w)) = atan(0.9*sin(w)/(.9*cos(w)-1))
+
+ let frequencyScale = Math.PI / (sampleRate / 2);
+
+ for (let k = 0; k < frequencies.length; ++k) {
+ let omega = frequencyScale * frequencies[k];
+ trueMag[k] = 1 / Math.sqrt(1.81 - 1.8 * Math.cos(omega));
+ truePhase[k] =
+ Math.atan(0.9 * Math.sin(omega) / (0.9 * Math.cos(omega) - 1));
+ }
+
+ iir.getFrequencyResponse(frequencies, iirMag, iirPhase);
+
+ // Thresholds were experimentally determined.
+ should(iirMag, '1-pole IIR Magnitude Response')
+ .beCloseToArray(trueMag, {absoluteThreshold: 2.8611e-6});
+ should(iirPhase, '1-pole IIR Phase Response')
+ .beCloseToArray(truePhase, {absoluteThreshold: 1.7882e-7});
+
+ task.done();
+ });
+
+ audit.define('compare IIR and biquad', (task, should) => {
+ // Create an IIR filter equivalent to the biquad filter. Compute the
+ // frequency response for both and verify that they are the same.
+ let context = new OfflineAudioContext(
+ 1, testDurationSec * sampleRate, sampleRate);
+
+ let biquad = context.createBiquadFilter();
+ let coef = createFilter(
+ biquad.type, biquad.frequency.value / (context.sampleRate / 2),
+ biquad.Q.value, biquad.gain.value);
+
+ let iir = context.createIIRFilter(
+ [coef.b0, coef.b1, coef.b2], [1, coef.a1, coef.a2]);
+
+ let frequencies =
+ createFrequencies(numberOfFrequencies, context.sampleRate);
+ let biquadMag = new Float32Array(numberOfFrequencies);
+ let biquadPhase = new Float32Array(numberOfFrequencies);
+ let iirMag = new Float32Array(numberOfFrequencies);
+ let iirPhase = new Float32Array(numberOfFrequencies);
+
+ biquad.getFrequencyResponse(frequencies, biquadMag, biquadPhase);
+ iir.getFrequencyResponse(frequencies, iirMag, iirPhase);
+
+ // Thresholds were experimentally determined.
+ should(iirMag, 'IIR Magnitude Response').beCloseToArray(biquadMag, {
+ absoluteThreshold: 2.7419e-5
+ });
+ should(iirPhase, 'IIR Phase Response').beCloseToArray(biquadPhase, {
+ absoluteThreshold: 2.7657e-5
+ });
+
+ task.done();
+ });
+
+ audit.define(
+ {
+ label: 'getFrequencyResponse',
+ description: 'Test out-of-bounds frequency values'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(1, 1, sampleRate);
+ let filter = new IIRFilterNode(
+ context, {feedforward: [1], feedback: [1, -.9]});
+
+ // Frequencies to test. These are all outside the valid range of
+ // frequencies of 0 to Nyquist.
+ let freq = new Float32Array(2);
+ freq[0] = -1;
+ freq[1] = context.sampleRate / 2 + 1;
+
+ let mag = new Float32Array(freq.length);
+ let phase = new Float32Array(freq.length);
+
+ filter.getFrequencyResponse(freq, mag, phase);
+
+ // Verify that the returned magnitude and phase entries are alL NaN
+ // since the frequencies are outside the valid range
+ for (let k = 0; k < mag.length; ++k) {
+ should(mag[k],
+ 'Magnitude response at frequency ' + freq[k])
+ .beNaN();
+ }
+
+ for (let k = 0; k < phase.length; ++k) {
+ should(phase[k],
+ 'Phase response at frequency ' + freq[k])
+ .beNaN();
+ }
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter.html b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter.html
new file mode 100644
index 0000000000..aa38a6bfca
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/iirfilter.html
@@ -0,0 +1,572 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Basic IIRFilterNode Operation
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/biquad-filters.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 24000;
+ let testDurationSec = 0.25;
+ let testFrames = testDurationSec * sampleRate;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('coefficient-normalization', (task, should) => {
+ // Test that the feedback coefficients are normalized. Do this be
+ // creating two IIRFilterNodes. One has normalized coefficients, and
+ // one doesn't. Compute the difference and make sure they're the same.
+ let context = new OfflineAudioContext(2, testFrames, sampleRate);
+
+ // Use a simple impulse as the source.
+ let buffer = context.createBuffer(1, 1, sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Gain node for computing the difference between the filters.
+ let gain = context.createGain();
+ gain.gain.value = -1;
+
+ // The IIR filters. Use a common feedforward array.
+ let ff = [1];
+
+ let fb1 = [1, .9];
+
+ let fb2 = new Float64Array(2);
+ // Scale the feedback coefficients by an arbitrary factor.
+ let coefScaleFactor = 2;
+ for (let k = 0; k < fb2.length; ++k) {
+ fb2[k] = coefScaleFactor * fb1[k];
+ }
+
+ let iir1;
+ let iir2;
+
+ should(function() {
+ iir1 = context.createIIRFilter(ff, fb1);
+ }, 'createIIRFilter with normalized coefficients').notThrow();
+
+ should(function() {
+ iir2 = context.createIIRFilter(ff, fb2);
+ }, 'createIIRFilter with unnormalized coefficients').notThrow();
+
+ // Create the graph. The output of iir1 (normalized coefficients) is
+ // channel 0, and the output of iir2 (unnormalized coefficients), with
+ // appropriate scaling, is channel 1.
+ let merger = context.createChannelMerger(2);
+ source.connect(iir1);
+ source.connect(iir2);
+ iir1.connect(merger, 0, 0);
+ iir2.connect(gain);
+
+ // The gain for the gain node should be set to compensate for the
+ // scaling of the coefficients. Since iir2 has scaled the coefficients
+ // by coefScaleFactor, the output is reduced by the same factor, so
+ // adjust the gain to scale the output of iir2 back up.
+ gain.gain.value = coefScaleFactor;
+ gain.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ source.start();
+
+ // Rock and roll!
+
+ context.startRendering()
+ .then(function(result) {
+ // Find the max amplitude of the result, which should be near
+ // zero.
+ let iir1Data = result.getChannelData(0);
+ let iir2Data = result.getChannelData(1);
+
+ // Threshold isn't exactly zero because the arithmetic is done
+ // differently between the IIRFilterNode and the BiquadFilterNode.
+ should(
+ iir2Data,
+ 'Output of IIR filter with unnormalized coefficients')
+ .beCloseToArray(iir1Data, {absoluteThreshold: 2.1958e-38});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('one-zero', (task, should) => {
+ // Create a simple 1-zero filter and compare with the expected output.
+ let context = new OfflineAudioContext(1, testFrames, sampleRate);
+
+ // Use a simple impulse as the source
+ let buffer = context.createBuffer(1, 1, sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // The filter is y(n) = 0.5*(x(n) + x(n-1)), a simple 2-point moving
+ // average. This is rather arbitrary; keep it simple.
+
+ let iir = context.createIIRFilter([0.5, 0.5], [1]);
+
+ // Create the graph
+ source.connect(iir);
+ iir.connect(context.destination);
+
+ // Rock and roll!
+ source.start();
+
+ context.startRendering()
+ .then(function(result) {
+ let actual = result.getChannelData(0);
+ let expected = new Float64Array(testFrames);
+ // The filter is a simple 2-point moving average of an impulse, so
+ // the first two values are non-zero and the rest are zero.
+ expected[0] = 0.5;
+ expected[1] = 0.5;
+ should(actual, 'IIR 1-zero output').beCloseToArray(expected, {
+ absoluteThreshold: 0
+ });
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('one-pole', (task, should) => {
+ // Create a simple 1-pole filter and compare with the expected output.
+
+ // The filter is y(n) + c*y(n-1)= x(n). The analytical response is
+ // (-c)^n, so choose a suitable number of frames to run the test for
+ // where the output isn't flushed to zero.
+ let c = 0.9;
+ let eps = 1e-20;
+ let duration = Math.floor(Math.log(eps) / Math.log(Math.abs(c)));
+ let context = new OfflineAudioContext(1, duration, sampleRate);
+
+ // Use a simple impulse as the source
+ let buffer = context.createBuffer(1, 1, sampleRate);
+ buffer.getChannelData(0)[0] = 1;
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+ let iir = context.createIIRFilter([1], [1, c]);
+
+ // Create the graph
+ source.connect(iir);
+ iir.connect(context.destination);
+
+ // Rock and roll!
+ source.start();
+
+ context.startRendering()
+ .then(function(result) {
+ let actual = result.getChannelData(0);
+ let expected = new Float64Array(actual.length);
+
+ // The filter is a simple 1-pole filter: y(n) = -c*y(n-k)+x(n),
+ // with an impulse as the input.
+ expected[0] = 1;
+ for (k = 1; k < testFrames; ++k) {
+ expected[k] = -c * expected[k - 1];
+ }
+
+ // Threshold isn't exactly zero due to round-off in the
+ // single-precision IIRFilterNode computations versus the
+ // double-precision Javascript computations.
+ should(actual, 'IIR 1-pole output').beCloseToArray(expected, {
+ absoluteThreshold: 2.7657e-8
+ });
+ })
+ .then(() => task.done());
+ });
+
+ // Return a function suitable for use as a defineTask function. This
+ // function creates an IIRFilterNode equivalent to the specified
+ // BiquadFilterNode and compares the outputs. The outputs from the two
+ // filters should be virtually identical.
+ function testWithBiquadFilter(filterType, errorThreshold, snrThreshold) {
+ return (task, should) => {
+ let context = new OfflineAudioContext(2, testFrames, sampleRate);
+
+ // Use a constant (step function) as the source
+ let buffer = createConstantBuffer(context, testFrames, 1);
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+
+ // Create the biquad. Choose some rather arbitrary values for Q and
+ // gain for the biquad so that the shelf filters aren't identical.
+ let biquad = context.createBiquadFilter();
+ biquad.type = filterType;
+ biquad.Q.value = 10;
+ biquad.gain.value = 10;
+
+ // Create the equivalent IIR Filter node by computing the coefficients
+ // of the given biquad filter type.
+ let nyquist = sampleRate / 2;
+ let coef = createFilter(
+ filterType, biquad.frequency.value / nyquist, biquad.Q.value,
+ biquad.gain.value);
+
+ let iir = context.createIIRFilter(
+ [coef.b0, coef.b1, coef.b2], [1, coef.a1, coef.a2]);
+
+ let merger = context.createChannelMerger(2);
+ // Create the graph
+ source.connect(biquad);
+ source.connect(iir);
+
+ biquad.connect(merger, 0, 0);
+ iir.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ // Rock and roll!
+ source.start();
+
+ context.startRendering()
+ .then(function(result) {
+ // Find the max amplitude of the result, which should be near
+ // zero.
+ let expected = result.getChannelData(0);
+ let actual = result.getChannelData(1);
+
+ // On MacOSX, WebAudio uses an optimized Biquad implementation
+ // that is different from the implementation used for Linux and
+ // Windows. This will cause the output to differ, even if the
+ // threshold passes. Thus, only print out a very small number
+ // of elements of the array where we have tested that they are
+ // consistent.
+ should(actual, 'IIRFilter for Biquad ' + filterType)
+ .beCloseToArray(expected, errorThreshold);
+
+ let snr = 10 * Math.log10(computeSNR(actual, expected));
+ should(snr, 'SNR for IIRFIlter for Biquad ' + filterType)
+ .beGreaterThanOrEqualTo(snrThreshold);
+ })
+ .then(() => task.done());
+ };
+ }
+
+ // Thresholds here are experimentally determined.
+ let biquadTestConfigs = [
+ {
+ filterType: 'lowpass',
+ snrThreshold: 91.221,
+ errorThreshold: {relativeThreshold: 4.9834e-5}
+ },
+ {
+ filterType: 'highpass',
+ snrThreshold: 105.4590,
+ errorThreshold: {absoluteThreshold: 2.9e-6, relativeThreshold: 3e-5}
+ },
+ {
+ filterType: 'bandpass',
+ snrThreshold: 104.060,
+ errorThreshold: {absoluteThreshold: 2e-7, relativeThreshold: 8.7e-4}
+ },
+ {
+ filterType: 'notch',
+ snrThreshold: 91.312,
+ errorThreshold: {absoluteThreshold: 0, relativeThreshold: 4.22e-5}
+ },
+ {
+ filterType: 'allpass',
+ snrThreshold: 91.319,
+ errorThreshold: {absoluteThreshold: 0, relativeThreshold: 4.31e-5}
+ },
+ {
+ filterType: 'lowshelf',
+ snrThreshold: 90.609,
+ errorThreshold: {absoluteThreshold: 0, relativeThreshold: 2.98e-5}
+ },
+ {
+ filterType: 'highshelf',
+ snrThreshold: 103.159,
+ errorThreshold: {absoluteThreshold: 0, relativeThreshold: 1.24e-5}
+ },
+ {
+ filterType: 'peaking',
+ snrThreshold: 91.504,
+ errorThreshold: {absoluteThreshold: 0, relativeThreshold: 5.05e-5}
+ }
+ ];
+
+ // Create a set of tasks based on biquadTestConfigs.
+ for (k = 0; k < biquadTestConfigs.length; ++k) {
+ let config = biquadTestConfigs[k];
+ let name = k + ': ' + config.filterType;
+ audit.define(
+ name,
+ testWithBiquadFilter(
+ config.filterType, config.errorThreshold, config.snrThreshold));
+ }
+
+ audit.define('multi-channel', (task, should) => {
+ // Multi-channel test. Create a biquad filter and the equivalent IIR
+ // filter. Filter the same multichannel signal and compare the results.
+ let nChannels = 3;
+ let context =
+ new OfflineAudioContext(nChannels, testFrames, sampleRate);
+
+ // Create a set of oscillators as the multi-channel source.
+ let source = [];
+
+ for (k = 0; k < nChannels; ++k) {
+ source[k] = context.createOscillator();
+ source[k].type = 'sawtooth';
+ // The frequency of the oscillator is pretty arbitrary, but each
+ // oscillator should have a different frequency.
+ source[k].frequency.value = 100 + k * 100;
+ }
+
+ let merger = context.createChannelMerger(3);
+
+ let biquad = context.createBiquadFilter();
+
+ // Create the equivalent IIR Filter node.
+ let nyquist = sampleRate / 2;
+ let coef = createFilter(
+ biquad.type, biquad.frequency.value / nyquist, biquad.Q.value,
+ biquad.gain.value);
+ let fb = [1, coef.a1, coef.a2];
+ let ff = [coef.b0, coef.b1, coef.b2];
+
+ let iir = context.createIIRFilter(ff, fb);
+ // Gain node to compute the difference between the IIR and biquad
+ // filter.
+ let gain = context.createGain();
+ gain.gain.value = -1;
+
+ // Create the graph.
+ for (k = 0; k < nChannels; ++k)
+ source[k].connect(merger, 0, k);
+
+ merger.connect(biquad);
+ merger.connect(iir);
+ iir.connect(gain);
+ biquad.connect(context.destination);
+ gain.connect(context.destination);
+
+ for (k = 0; k < nChannels; ++k)
+ source[k].start();
+
+ context.startRendering()
+ .then(function(result) {
+ let errorThresholds = [3.7671e-5, 3.0071e-5, 2.6241e-5];
+
+ // Check the difference signal on each channel
+ for (channel = 0; channel < result.numberOfChannels; ++channel) {
+ // Find the max amplitude of the result, which should be near
+ // zero.
+ let data = result.getChannelData(channel);
+ let maxError =
+ data.reduce(function(reducedValue, currentValue) {
+ return Math.max(reducedValue, Math.abs(currentValue));
+ });
+
+ should(
+ maxError,
+ 'Max difference between IIR and Biquad on channel ' +
+ channel)
+ .beLessThanOrEqualTo(errorThresholds[channel]);
+ }
+
+ })
+ .then(() => task.done());
+ });
+
+ // Apply an IIRFilter to the given input signal.
+ //
+ // IIR filter in the time domain is
+ //
+ // y[n] = sum(ff[k]*x[n-k], k, 0, M) - sum(fb[k]*y[n-k], k, 1, N)
+ //
+ function iirFilter(input, feedforward, feedback) {
+ // For simplicity, create an x buffer that contains the input, and a y
+ // buffer that contains the output. Both of these buffers have an
+ // initial work space to implement the initial memory of the filter.
+ let workSize = Math.max(feedforward.length, feedback.length);
+ let x = new Float32Array(input.length + workSize);
+
+ // Float64 because we want to match the implementation that uses doubles
+ // to minimize roundoff.
+ let y = new Float64Array(input.length + workSize);
+
+ // Copy the input over.
+ for (let k = 0; k < input.length; ++k)
+ x[k + feedforward.length] = input[k];
+
+ // Run the filter
+ for (let n = 0; n < input.length; ++n) {
+ let index = n + workSize;
+ let yn = 0;
+ for (let k = 0; k < feedforward.length; ++k)
+ yn += feedforward[k] * x[index - k];
+ for (let k = 0; k < feedback.length; ++k)
+ yn -= feedback[k] * y[index - k];
+
+ y[index] = yn;
+ }
+
+ return y.slice(workSize).map(Math.fround);
+ }
+
+ // Cascade the two given biquad filters to create one IIR filter.
+ function cascadeBiquads(f1Coef, f2Coef) {
+ // The biquad filters are:
+ //
+ // f1 = (b10 + b11/z + b12/z^2)/(1 + a11/z + a12/z^2);
+ // f2 = (b20 + b21/z + b22/z^2)/(1 + a21/z + a22/z^2);
+ //
+ // To cascade them, multiply the two transforms together to get a fourth
+ // order IIR filter.
+
+ let numProduct = [
+ f1Coef.b0 * f2Coef.b0, f1Coef.b0 * f2Coef.b1 + f1Coef.b1 * f2Coef.b0,
+ f1Coef.b0 * f2Coef.b2 + f1Coef.b1 * f2Coef.b1 + f1Coef.b2 * f2Coef.b0,
+ f1Coef.b1 * f2Coef.b2 + f1Coef.b2 * f2Coef.b1, f1Coef.b2 * f2Coef.b2
+ ];
+
+ let denProduct = [
+ 1, f2Coef.a1 + f1Coef.a1,
+ f2Coef.a2 + f1Coef.a1 * f2Coef.a1 + f1Coef.a2,
+ f1Coef.a1 * f2Coef.a2 + f1Coef.a2 * f2Coef.a1, f1Coef.a2 * f2Coef.a2
+ ];
+
+ return {
+ ff: numProduct, fb: denProduct
+ }
+ }
+
+ // Find the magnitude of the root of the quadratic that has the maximum
+ // magnitude.
+ //
+ // The quadratic is z^2 + a1 * z + a2 and we want the root z that has the
+ // largest magnitude.
+ function largestRootMagnitude(a1, a2) {
+ let discriminant = a1 * a1 - 4 * a2;
+ if (discriminant < 0) {
+ // Complex roots: -a1/2 +/- i*sqrt(-d)/2. Thus the magnitude of each
+ // root is the same and is sqrt(a1^2/4 + |d|/4)
+ let d = Math.sqrt(-discriminant);
+ return Math.hypot(a1 / 2, d / 2);
+ } else {
+ // Real roots
+ let d = Math.sqrt(discriminant);
+ return Math.max(Math.abs((-a1 + d) / 2), Math.abs((-a1 - d) / 2));
+ }
+ }
+
+ audit.define('4th-order-iir', (task, should) => {
+ // Cascade 2 lowpass biquad filters and compare that with the equivalent
+ // 4th order IIR filter.
+
+ let nyquist = sampleRate / 2;
+ // Compute the coefficients of a lowpass filter.
+
+ // First some preliminary stuff. Compute the coefficients of the
+ // biquad. This is used to figure out how frames to use in the test.
+ let biquadType = 'lowpass';
+ let biquadCutoff = 350;
+ let biquadQ = 5;
+ let biquadGain = 1;
+
+ let coef = createFilter(
+ biquadType, biquadCutoff / nyquist, biquadQ, biquadGain);
+
+ // Cascade the biquads together to create an equivalent IIR filter.
+ let cascade = cascadeBiquads(coef, coef);
+
+ // Since we're cascading two identical biquads, the root of denominator
+ // of the IIR filter is repeated, so the root of the denominator with
+ // the largest magnitude occurs twice. The impulse response of the IIR
+ // filter will be roughly c*(r*r)^n at time n, where r is the root of
+ // largest magnitude. This approximation gets better as n increases.
+ // We can use this to get a rough idea of when the response has died
+ // down to a small value.
+
+ // This is the value we will use to determine how many frames to render.
+ // Rendering too many is a waste of time and also makes it hard to
+ // compare the actual result to the expected because the magnitudes are
+ // so small that they could be mostly round-off noise.
+ //
+ // Find magnitude of the root with largest magnitude
+ let rootMagnitude = largestRootMagnitude(coef.a1, coef.a2);
+
+ // Find n such that |r|^(2*n) <= eps. That is, n = log(eps)/(2*log(r)).
+ // Somewhat arbitrarily choose eps = 1e-20;
+ let eps = 1e-20;
+ let framesForTest =
+ Math.floor(Math.log(eps) / (2 * Math.log(rootMagnitude)));
+
+ // We're ready to create the graph for the test. The offline context
+ // has two channels: channel 0 is the expected (cascaded biquad) result
+ // and channel 1 is the actual IIR filter result.
+ let context = new OfflineAudioContext(2, framesForTest, sampleRate);
+
+ // Use a simple impulse with a large (arbitrary) amplitude as the source
+ let amplitude = 1;
+ let buffer = context.createBuffer(1, testFrames, sampleRate);
+ buffer.getChannelData(0)[0] = amplitude;
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Create the two biquad filters. Doesn't really matter what, but for
+ // simplicity we choose identical lowpass filters with the same
+ // parameters.
+ let biquad1 = context.createBiquadFilter();
+ biquad1.type = biquadType;
+ biquad1.frequency.value = biquadCutoff;
+ biquad1.Q.value = biquadQ;
+
+ let biquad2 = context.createBiquadFilter();
+ biquad2.type = biquadType;
+ biquad2.frequency.value = biquadCutoff;
+ biquad2.Q.value = biquadQ;
+
+ let iir = context.createIIRFilter(cascade.ff, cascade.fb);
+
+ // Create the merger to get the signals into multiple channels
+ let merger = context.createChannelMerger(2);
+
+ // Create the graph, filtering the source through two biquads.
+ source.connect(biquad1);
+ biquad1.connect(biquad2);
+ biquad2.connect(merger, 0, 0);
+
+ source.connect(iir);
+ iir.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ // Now filter the source through the IIR filter.
+ let y = iirFilter(buffer.getChannelData(0), cascade.ff, cascade.fb);
+
+ // Rock and roll!
+ source.start();
+
+ context.startRendering()
+ .then(function(result) {
+ let expected = result.getChannelData(0);
+ let actual = result.getChannelData(1);
+
+ should(actual, '4-th order IIRFilter (biquad ref)')
+ .beCloseToArray(expected, {
+ // Thresholds experimentally determined.
+ absoluteThreshold: 1.59e-7,
+ relativeThreshold: 2.11e-5,
+ });
+
+ let snr = 10 * Math.log10(computeSNR(actual, expected));
+ should(snr, 'SNR of 4-th order IIRFilter (biquad ref)')
+ .beGreaterThanOrEqualTo(108.947);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html
new file mode 100644
index 0000000000..001a2a6172
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html
@@ -0,0 +1,59 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test the IIRFilterNode Interface</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+test(function(t) {
+ var ac = new AudioContext();
+
+ function check_args(arg1, arg2, err, desc) {
+ test(function() {
+ assert_throws_dom(err, function() {
+ ac.createIIRFilter(arg1, arg2)
+ })
+ }, desc)
+ }
+
+ check_args([], [1.0], 'NotSupportedError',
+ 'feedforward coefficients can not be empty');
+
+ check_args([1.0], [], 'NotSupportedError',
+ 'feedback coefficients can not be empty');
+
+ var coeff = new Float32Array(21)
+ coeff[0] = 1.0;
+
+ check_args(coeff, [1.0], 'NotSupportedError',
+ 'more than 20 feedforward coefficients can not be used');
+
+ check_args([1.0], coeff, 'NotSupportedError',
+ 'more than 20 feedback coefficients can not be used');
+
+ check_args([0.0, 0.0], [1.0], 'InvalidStateError',
+ 'at least one feedforward coefficient must be non-zero');
+
+ check_args([0.5, 0.5], [0.0], 'InvalidStateError',
+ 'the first feedback coefficient must be non-zero');
+
+}, "IIRFilterNode coefficients are checked properly");
+
+test(function(t) {
+ var ac = new AudioContext();
+
+ var frequencies = new Float32Array([-1.0, ac.sampleRate*0.5 - 1.0, ac.sampleRate]);
+ var magResults = new Float32Array(3);
+ var phaseResults = new Float32Array(3);
+
+ var filter = ac.createIIRFilter([0.5, 0.5], [1.0]);
+ filter.getFrequencyResponse(frequencies, magResults, phaseResults);
+
+ assert_true(isNaN(magResults[0]), "Invalid input frequency should give NaN magnitude response");
+ assert_true(!isNaN(magResults[1]), "Valid input frequency should not give NaN magnitude response");
+ assert_true(isNaN(magResults[2]), "Invalid input frequency should give NaN magnitude response");
+ assert_true(isNaN(phaseResults[0]), "Invalid input frequency should give NaN phase response");
+ assert_true(!isNaN(phaseResults[1]), "Valid input frequency should not give NaN phase response");
+ assert_true(isNaN(phaseResults[2]), "Invalid input frequency should give NaN phase response");
+
+}, "IIRFilterNode getFrequencyResponse handles invalid frequencies properly");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html
new file mode 100644
index 0000000000..38bd94a037
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html
@@ -0,0 +1,76 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test if MediaElementAudioSourceNode works for cross-origin redirects with
+ "cors" request mode.
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/common/get-host-info.sub.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ setup(() => {
+ const context = new AudioContext();
+ context.suspend();
+
+ const host_info = get_host_info();
+ const audioElement = document.createElement('audio');
+ audioElement.loop = true;
+ audioElement.crossOrigin = 'anonymous';
+ const wav =
+ host_info.HTTPS_ORIGIN + '/webaudio/resources/4ch-440.wav?' +
+ 'pipe=header(access-control-allow-origin,*)';
+ audioElement.src =
+ host_info.HTTPS_REMOTE_ORIGIN +
+ '/fetch/api/resources/redirect.py?location=' +
+ encodeURIComponent(wav);
+ let source;
+ let workletRecorder;
+
+ audit.define(
+ {label: 'setting-up-graph'},
+ (task, should) => {
+ source = new MediaElementAudioSourceNode(context, {
+ mediaElement: audioElement
+ });
+ workletRecorder = new AudioWorkletNode(
+ context, 'recorder-processor', {channelCount: 4});
+ source.connect(workletRecorder).connect(context.destination);
+ task.done();
+ });
+
+ // The recorded data from MESN must be non-zero. The source file contains
+ // 4 channels of sine wave.
+ audit.define(
+ {label: 'start-playback-and-capture'},
+ (task, should) => {
+ workletRecorder.port.onmessage = (event) => {
+ if (event.data.type === 'recordfinished') {
+ for (let i = 0; i < event.data.recordBuffer.length; ++i) {
+ const channelData = event.data.recordBuffer[i];
+ should(channelData, `Recorded channel #${i}`)
+ .notBeConstantValueOf(0);
+ }
+ }
+
+ task.done();
+ };
+
+ context.resume();
+ audioElement.play();
+ });
+
+ Promise.all([
+ context.audioWorklet.addModule('/webaudio/js/worklet-recorder.js')
+ ]).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html
new file mode 100644
index 0000000000..56d0787b76
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html
@@ -0,0 +1,130 @@
+<!doctype html>
+
+<!--
+Tests that a create MediaElementSourceNode that is passed through
+a script processor passes the stream data.
+The the script processor saves the input buffers it gets to a temporary
+array, and after the playback has stopped, the contents are compared
+to those of a loaded AudioBuffer with the same source.
+
+Somewhat similiar to a test from Mozilla:
+https://searchfox.org/mozilla-central/source/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html
+-->
+
+<html class="a">
+ <head>
+ <title>MediaElementAudioSource interface test (to scriptProcessor)</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ <script src="/webaudio/js/buffer-loader.js"></script>
+ </head>
+ <body class="a">
+ <div id="log"></div>
+ <script>
+ var elementSourceTest = async_test(function(elementSourceTest) {
+
+ var src = '/webaudio/resources/sin_440Hz_-6dBFS_1s.wav';
+ var BUFFER_SIZE = 2048;
+ var context = null;
+ var actualBufferArrayC0 = new Float32Array(0);
+ var actualBufferArrayC1 = new Float32Array(0);
+ var audio = null, source = null, processor = null
+
+ function loadExpectedBuffer(event) {
+ bufferLoader = new BufferLoader(
+ context,
+ [src],
+ elementSourceTest.step_func(bufferLoadCompleted)
+ );
+ bufferLoader.load();
+ };
+
+ function bufferLoadCompleted(buffer) {
+ runTests(buffer);
+ };
+
+ function concatTypedArray(arr1, arr2) {
+ var result = new Float32Array(arr1.length + arr2.length);
+ result.set(arr1);
+ result.set(arr2, arr1.length);
+ return result;
+ }
+
+ // Create Audio context. The reference wav file is sampled at 44.1 kHz so
+ // use the same rate for the context to remove extra resampling that might
+ // be required.
+ context = new AudioContext({sampleRate: 44100});
+
+ // Create an audio element, and a media element source
+ audio = document.createElement('audio');
+ audio.src = src;
+ source = context.createMediaElementSource(audio);
+
+ function processListener (e) {
+ actualBufferArrayC0 = concatTypedArray(actualBufferArrayC0, e.inputBuffer.getChannelData(0));
+ actualBufferArrayC1 = concatTypedArray(actualBufferArrayC1, e.inputBuffer.getChannelData(1));
+ }
+
+ // Create a processor node to copy the input to the actual buffer
+ processor = context.createScriptProcessor(BUFFER_SIZE);
+ source.connect(processor);
+ processor.connect(context.destination);
+ let audioprocessListener = elementSourceTest.step_func(processListener);
+ processor.addEventListener('audioprocess', audioprocessListener);
+
+ context.addEventListener('statechange', elementSourceTest.step_func(() => {
+ assert_equals(context.state, "running", "context.state");
+ audio.play();
+ }), {once: true});
+
+ // When media playback ended, save the begin to compare with expected buffer
+ audio.addEventListener("ended", elementSourceTest.step_func(function(e) {
+ // Setting a timeout since we need audioProcess event to run for all samples
+ window.setTimeout(elementSourceTest.step_func(loadExpectedBuffer), 50);
+ }));
+
+ function runTests(expected) {
+ source.disconnect();
+ processor.disconnect();
+
+ // firefox seems to process events after disconnect
+ processor.removeEventListener('audioprocess', audioprocessListener)
+
+ // Note: the expected result is from a mono source file.
+ var expectedBuffer = expected[0];
+
+ // Trim the actual elements because we don't have a fine-grained
+ // control over the start and end time of recording the data.
+ var actualTrimmedC0 = trimEmptyElements(actualBufferArrayC0);
+ var actualTrimmedC1 = trimEmptyElements(actualBufferArrayC1);
+ var expectedLength = trimEmptyElements(expectedBuffer.getChannelData(0)).length;
+
+ // Test that there is some data.
+ test(function() {
+ assert_greater_than(actualTrimmedC0.length, 0,
+ "processed data array (C0) length greater than 0");
+ assert_greater_than(actualTrimmedC1.length, 0,
+ "processed data array (C1) length greater than 0");
+ }, "Channel 0 processed some data");
+
+ // Test the actual contents of the 1st and second channel.
+ test(function() {
+ assert_array_approx_equals(
+ actualTrimmedC0,
+ trimEmptyElements(expectedBuffer.getChannelData(0)),
+ 1e-4,
+ "comparing expected and rendered buffers (channel 0)");
+ assert_array_approx_equals(
+ actualTrimmedC1,
+ trimEmptyElements(expectedBuffer.getChannelData(0)),
+ 1e-4,
+ "comparing expected and rendered buffers (channel 1)");
+ }, "All data processed correctly");
+
+ elementSourceTest.done();
+ };
+ }, "Element Source tests completed");
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html
new file mode 100644
index 0000000000..de2f0b7dd3
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html
@@ -0,0 +1,75 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test if MediaElementAudioSourceNode works for cross-origin redirects with
+ "no-cors" request mode.
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/common/get-host-info.sub.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ setup(() => {
+ const context = new AudioContext();
+ context.suspend();
+
+ const host_info = get_host_info();
+ const audioElement = document.createElement('audio');
+ audioElement.loop = true;
+ const wav =
+ host_info.HTTPS_ORIGIN + '/webaudio/resources/4ch-440.wav?' +
+ 'pipe=header(access-control-allow-origin,*)';
+ audioElement.src =
+ host_info.HTTPS_REMOTE_ORIGIN +
+ '/fetch/api/resources/redirect.py?location=' +
+ encodeURIComponent(wav);
+ let source;
+ let workletRecorder;
+
+ audit.define(
+ {label: 'setting-up-graph'},
+ (task, should) => {
+ source = new MediaElementAudioSourceNode(context, {
+ mediaElement: audioElement
+ });
+ workletRecorder = new AudioWorkletNode(
+ context, 'recorder-processor', {channelCount: 4});
+ source.connect(workletRecorder).connect(context.destination);
+ task.done();
+ });
+
+ // The recorded data from MESN must be non-zero. The source file contains
+ // 4 channels of sine wave.
+ audit.define(
+ {label: 'start-playback-and-capture'},
+ (task, should) => {
+ workletRecorder.port.onmessage = (event) => {
+ if (event.data.type === 'recordfinished') {
+ for (let i = 0; i < event.data.recordBuffer.length; ++i) {
+ const channelData = event.data.recordBuffer[i];
+ should(channelData, `Recorded channel #${i}`)
+ .beConstantValueOf(0);
+ }
+ }
+
+ task.done();
+ };
+
+ context.resume();
+ audioElement.play();
+ });
+
+ Promise.all([
+ context.audioWorklet.addModule('/webaudio/js/worklet-recorder.js')
+ ]).then(() => {
+ audit.run();
+ });
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/ctor-mediastreamaudiodestination.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/ctor-mediastreamaudiodestination.html
new file mode 100644
index 0000000000..5d3fd0c26f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/ctor-mediastreamaudiodestination.html
@@ -0,0 +1,64 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: MediaStreamAudioDestinationNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context = new AudioContext();
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ // Need AudioContext, not OfflineAudioContext, for these tests.
+ should(() => {
+ context = new AudioContext();
+ }, 'context = new AudioContext()').notThrow();
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(
+ should, 'MediaStreamAudioDestinationNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(
+ should, 'MediaStreamAudioDestinationNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 0,
+ channelCount: 2,
+ channelCountMode: 'explicit',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, []);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(
+ should, context, 'MediaStreamAudioDestinationNode', {
+ channelCount: {
+ // An arbitrary but valid, non-default count for this node.
+ value: 7
+ }
+ });
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-ctor.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-ctor.html
new file mode 100644
index 0000000000..a711419656
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-ctor.html
@@ -0,0 +1,73 @@
+<!DOCTYPE html>
+
+<html class="a">
+ <head>
+ <title>MediaStreamAudioSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body class="a">
+ <div id="log"></div>
+ <script>
+ setup({explicit_done: true});
+ // Wait until the DOM is ready to be able to get a reference to the canvas
+ // element.
+ window.addEventListener("load", function() {
+ const ac = new AudioContext();
+ const emptyStream = new MediaStream();
+
+ test(function() {
+ assert_throws_dom(
+ "InvalidStateError",
+ function() {
+ ac.createMediaStreamSource(emptyStream);
+ },
+ `A MediaStreamAudioSourceNode can only be constructed via the factory
+ method with a MediaStream that has at least one track of kind "audio"`
+ );
+ }, "MediaStreamAudioSourceNode created with factory method and MediaStream with no tracks");
+
+ test(function() {
+ assert_throws_dom(
+ "InvalidStateError",
+ function() {
+ new MediaStreamAudioSourceNode(ac, { mediaStream: emptyStream });
+ },
+ `A MediaStreamAudioSourceNode can only be constructed via the constructor
+ with a MediaStream that has at least one track of kind "audio"`
+ );
+ }, "MediaStreamAudioSourceNode created with constructor and MediaStream with no tracks");
+
+ const canvas = document.querySelector("canvas");
+ const ctx = canvas.getContext("2d");
+ const videoOnlyStream = canvas.captureStream();
+
+ test(function() {
+ assert_throws_dom(
+ "InvalidStateError",
+ function() {
+ ac.createMediaStreamSource(videoOnlyStream);
+ },
+ `A MediaStreamAudioSourceNode can only be constructed via the factory with a
+ MediaStream that has at least one track of kind "audio"`
+ );
+ }, `MediaStreamAudioSourceNode created with the factory method and MediaStream with only a video track`);
+
+ test(function() {
+ assert_throws_dom(
+ "InvalidStateError",
+ function() {
+ new MediaStreamAudioSourceNode(ac, {
+ mediaStream: videoOnlyStream,
+ });
+ },
+ `A MediaStreamAudioSourceNode can only be constructed via the factory with a
+ MediaStream that has at least one track of kind "audio"`
+ );
+ }, `MediaStreamAudioSourceNode created with constructor and MediaStream with only a video track`);
+ done();
+ });
+ </script>
+ </body>
+ <canvas></canvas>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html
new file mode 100644
index 0000000000..816eba0b29
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/mediastreamaudiosourcenode-routing.html
@@ -0,0 +1,127 @@
+<!DOCTYPE html>
+
+<html class="a">
+ <head>
+ <title>MediaStreamAudioSourceNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ </head>
+ <body class="a">
+ <div id="log"></div>
+ <script>
+ function binIndexForFrequency(frequency, analyser) {
+ return (
+ 1 +
+ Math.round(
+ (frequency * analyser.fftSize) / analyser.context.sampleRate
+ )
+ );
+ }
+
+ const t = async_test(
+ "MediaStreamAudioSourceNode captures the right track."
+ );
+ t.step(function() {
+ const ac = new AudioContext();
+ // Test that the right track is captured. Set up a MediaStream that has two
+ // tracks, one with a tone at 100Hz and one with a tone at 1000Hz.
+ const dest0 = ac.createMediaStreamDestination();
+ const dest1 = ac.createMediaStreamDestination();
+ const osc0 = ac.createOscillator();
+ const osc1 = ac.createOscillator();
+ osc0.frequency.value = 100;
+ osc1.frequency.value = 1000;
+ osc0.connect(dest0);
+ osc1.connect(dest1);
+ osc0.start(0);
+ osc1.start(0);
+ const track0 = dest0.stream.getAudioTracks()[0];
+ const track0id = track0.id;
+ const track1 = dest1.stream.getAudioTracks()[0];
+ const track1id = track1.id;
+
+ let ids = [track0id, track1id];
+ ids.sort();
+ let targetFrequency;
+ let otherFrequency;
+ if (ids[0] == track0id) {
+ targetFrequency = 100;
+ otherFrequency = 1000;
+ } else {
+ targetFrequency = 1000;
+ otherFrequency = 100;
+ }
+
+ let twoTrackMediaStream = new MediaStream();
+ twoTrackMediaStream.addTrack(track0);
+ twoTrackMediaStream.addTrack(track1);
+
+ const twoTrackSource = ac.createMediaStreamSource(twoTrackMediaStream);
+ const analyser = ac.createAnalyser();
+ // Don't do smoothing so that the frequency data changes quickly
+ analyser.smoothingTimeConstant = 0;
+
+ twoTrackSource.connect(analyser);
+
+ const indexToCheckForHighEnergy = binIndexForFrequency(
+ targetFrequency,
+ analyser
+ );
+ const indexToCheckForLowEnergy = binIndexForFrequency(
+ otherFrequency,
+ analyser
+ );
+ let frequencyData = new Float32Array(1024);
+ let checkCount = 0;
+ let numberOfRemovals = 0;
+ let stopped = false;
+ function analyse() {
+ analyser.getFloatFrequencyData(frequencyData);
+ // there should be high energy in the right bin, higher than 40dbfs because
+ // it's supposed to be a sine wave at 0dbfs
+ if (frequencyData[indexToCheckForHighEnergy] > -40 && !stopped) {
+ assert_true(true, "Correct track routed to the AudioContext.");
+ checkCount++;
+ }
+ if (stopped && frequencyData[indexToCheckForHighEnergy] < -40) {
+ assert_true(
+ true,
+ `After stopping the track, low energy is found in the
+ same bin`
+ );
+ checkCount++;
+ }
+ if (checkCount > 5 && checkCount < 20) {
+ twoTrackMediaStream.getAudioTracks().forEach(track => {
+ if (track.id == ids[0]) {
+ numberOfRemovals++;
+ window.removedTrack = track;
+ twoTrackMediaStream.removeTrack(track);
+ }
+ });
+ assert_true(
+ numberOfRemovals == 1,
+ `The mediastreamtrack can only be
+ removed once from the mediastream`
+ );
+ } else if (checkCount >= 20 && checkCount < 30) {
+ window.removedTrack.stop();
+ stopped = true;
+ } else if (checkCount >= 30) {
+ assert_true(
+ numberOfRemovals == 1,
+ `After removing the track from the
+ mediastream, it's still routed to the graph.`
+ );
+ // After some time, consider that it worked.
+ t.done();
+ return;
+ }
+
+ t.step_timeout(analyse, 100);
+ }
+ t.step_timeout(analyse, 100);
+ });
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/ctor-offlineaudiocontext.html b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/ctor-offlineaudiocontext.html
new file mode 100644
index 0000000000..4b68631036
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/ctor-offlineaudiocontext.html
@@ -0,0 +1,203 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test Constructor: OfflineAudioContext</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+
+ <body>
+ <script>
+ let audit = Audit.createTaskRunner();
+
+ // Just a simple test of the 3-arg constructor; This should be
+ // well-covered by other layout tests that use the 3-arg constructor.
+ audit.define(
+ {label: 'basic', description: 'Old-style constructor'},
+ (task, should) => {
+ let context;
+
+ // First and only arg should be a dictionary.
+ should(() => {
+ new OfflineAudioContext(3);
+ }, 'new OfflineAudioContext(3)').throw(TypeError);
+
+ // Constructor needs 1 or 3 args, so 2 should throw.
+ should(() => {
+ new OfflineAudioContext(3, 42);
+ }, 'new OfflineAudioContext(3, 42)').throw(TypeError);
+
+ // Valid constructor
+ should(() => {
+ context = new OfflineAudioContext(3, 42, 12345);
+ }, 'context = new OfflineAudioContext(3, 42, 12345)').notThrow();
+
+ // Verify that the context was constructed correctly.
+ should(context.length, 'context.length').beEqualTo(42);
+ should(context.sampleRate, 'context.sampleRate').beEqualTo(12345);
+ should(
+ context.destination.channelCount,
+ 'context.destination.channelCount')
+ .beEqualTo(3);
+ should(
+ context.destination.channelCountMode,
+ 'context.destination.channelCountMode')
+ .beEqualTo('explicit');
+ should(
+ context.destination.channelInterpretation,
+ 'context.destination.channelInterpretation')
+ .beEqualTo('speakers');
+ task.done();
+ });
+
+ // Test constructor throws an error if the required members of the
+ // dictionary are not given.
+ audit.define(
+ {label: 'options-1', description: 'Required options'},
+ (task, should) => {
+ let context2;
+
+ // No args should throw
+ should(() => {
+ new OfflineAudioContext();
+ }, 'new OfflineAudioContext()').throw(TypeError);
+
+ // Empty OfflineAudioContextOptions should throw
+ should(() => {
+ new OfflineAudioContext({});
+ }, 'new OfflineAudioContext({})').throw(TypeError);
+
+ let options = {length: 42};
+ // sampleRate is required.
+ should(
+ () => {
+ new OfflineAudioContext(options);
+ },
+ 'new OfflineAudioContext(' + JSON.stringify(options) + ')')
+ .throw(TypeError);
+
+ options = {sampleRate: 12345};
+ // length is required.
+ should(
+ () => {
+ new OfflineAudioContext(options);
+ },
+ 'new OfflineAudioContext(' + JSON.stringify(options) + ')')
+ .throw(TypeError);
+
+ // Valid constructor. Verify that the resulting context has the
+ // correct values.
+ options = {length: 42, sampleRate: 12345};
+ should(
+ () => {
+ context2 = new OfflineAudioContext(options);
+ },
+ 'c2 = new OfflineAudioContext(' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(
+ context2.destination.channelCount,
+ 'c2.destination.channelCount')
+ .beEqualTo(1);
+ should(context2.length, 'c2.length').beEqualTo(options.length);
+ should(context2.sampleRate, 'c2.sampleRate')
+ .beEqualTo(options.sampleRate);
+ should(
+ context2.destination.channelCountMode,
+ 'c2.destination.channelCountMode')
+ .beEqualTo('explicit');
+ should(
+ context2.destination.channelInterpretation,
+ 'c2.destination.channelInterpretation')
+ .beEqualTo('speakers');
+
+ task.done();
+ });
+
+ // Constructor should throw errors for invalid values specified by
+ // OfflineAudioContextOptions.
+ audit.define(
+ {label: 'options-2', description: 'Invalid options'},
+ (task, should) => {
+ let options = {length: 42, sampleRate: 8000, numberOfChannels: 33};
+
+ // channelCount too large.
+ should(
+ () => {
+ new OfflineAudioContext(options);
+ },
+ 'new OfflineAudioContext(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ // length cannot be 0
+ options = {length: 0, sampleRate: 8000};
+ should(
+ () => {
+ new OfflineAudioContext(options);
+ },
+ 'new OfflineAudioContext(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ // sampleRate outside valid range
+ options = {length: 1, sampleRate: 1};
+ should(
+ () => {
+ new OfflineAudioContext(options);
+ },
+ 'new OfflineAudioContext(' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+
+ task.done();
+ });
+
+ audit.define(
+ {label: 'options-3', description: 'Valid options'},
+ (task, should) => {
+ let context;
+ let options = {
+ length: 1,
+ sampleRate: 8000,
+ };
+
+ // Verify context with valid constructor has the correct values.
+ should(
+ () => {
+ context = new OfflineAudioContext(options);
+ },
+ 'c = new OfflineAudioContext' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(context.length, 'c.length').beEqualTo(options.length);
+ should(context.sampleRate, 'c.sampleRate')
+ .beEqualTo(options.sampleRate);
+ should(
+ context.destination.channelCount, 'c.destination.channelCount')
+ .beEqualTo(1);
+ should(
+ context.destination.channelCountMode,
+ 'c.destination.channelCountMode')
+ .beEqualTo('explicit');
+ should(
+ context.destination.channelInterpretation,
+ 'c.destination.channelCountMode')
+ .beEqualTo('speakers');
+
+ options.numberOfChannels = 7;
+ should(
+ () => {
+ context = new OfflineAudioContext(options);
+ },
+ 'c = new OfflineAudioContext' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(
+ context.destination.channelCount, 'c.destination.channelCount')
+ .beEqualTo(options.numberOfChannels);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html
new file mode 100644
index 0000000000..ee976f7f72
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<title>Test currentTime at completion of OfflineAudioContext rendering</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ // sampleRate is a power of two so that time can be represented exactly
+ // in double currentTime.
+ var context = new OfflineAudioContext(1, 1, 65536);
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, 1, "buffer length");
+ assert_equals(context.currentTime, 128 / context.sampleRate,
+ "currentTime at completion");
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/offlineaudiocontext-detached-execution-context.html b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/offlineaudiocontext-detached-execution-context.html
new file mode 100644
index 0000000000..6eafd15fd2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/offlineaudiocontext-detached-execution-context.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Testing behavior OfflineAudioContext after execution context is detached
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ const audit = Audit.createTaskRunner();
+
+ audit.define('decoding-on-detached-iframe', (task, should) => {
+ const iframe =
+ document.createElementNS("http://www.w3.org/1999/xhtml", "iframe");
+ document.body.appendChild(iframe);
+
+ // Use the lowest value possible for the faster test.
+ let context =
+ new iframe.contentWindow.OfflineAudioContext(1, 1, 8000);
+
+ document.body.removeChild(iframe);
+
+ return should(context.decodeAudioData(new ArrayBuffer(1)),
+ 'decodeAudioData() upon a detached iframe')
+ .beRejectedWith('InvalidStateError');
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/startrendering-after-discard.html b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/startrendering-after-discard.html
new file mode 100644
index 0000000000..dd610ec335
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/startrendering-after-discard.html
@@ -0,0 +1,24 @@
+<!doctype html>
+<title>Test for rejected promise from startRendering() on an
+ OfflineAudioContext in a discarded browsing context</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<body></body>
+<script>
+let context;
+let childDOMException;
+setup(() => {
+ const frame = document.createElement('iframe');
+ document.body.appendChild(frame);
+ context = new frame.contentWindow.OfflineAudioContext(
+ {length: 1, sampleRate: 48000});
+ childDOMException = frame.contentWindow.DOMException;
+ frame.remove();
+});
+
+promise_test((t) => promise_rejects_dom(
+ t, 'InvalidStateError', childDOMException, context.startRendering()),
+ 'startRendering()');
+// decodeAudioData() is tested in
+// offlineaudiocontext-detached-execution-context.html
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/ctor-oscillator.html b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/ctor-oscillator.html
new file mode 100644
index 0000000000..bf50195a5b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/ctor-oscillator.html
@@ -0,0 +1,112 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: Oscillator
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'OscillatorNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'OscillatorNode', context, {
+ prefix: prefix,
+ numberOfInputs: 0,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(
+ should, node, prefix,
+ [{name: 'type', value: 'sine'}, {name: 'frequency', value: 440}]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'OscillatorNode');
+ task.done();
+ });
+
+ audit.define('constructor options', (task, should) => {
+ let node;
+ let options = {type: 'sawtooth', detune: 7, frequency: 918};
+
+ should(
+ () => {
+ node = new OscillatorNode(context, options);
+ },
+ 'node1 = new OscillatorNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+
+ should(node.type, 'node1.type').beEqualTo(options.type);
+ should(node.detune.value, 'node1.detune.value')
+ .beEqualTo(options.detune);
+ should(node.frequency.value, 'node1.frequency.value')
+ .beEqualTo(options.frequency);
+
+ should(node.channelCount, 'node1.channelCount').beEqualTo(2);
+ should(node.channelCountMode, 'node1.channelCountMode')
+ .beEqualTo('max');
+ should(node.channelInterpretation, 'node1.channelInterpretation')
+ .beEqualTo('speakers');
+
+ // Test that type and periodicWave options work as described.
+ options = {
+ type: 'sine',
+ periodicWave: new PeriodicWave(context, {real: [1, 1]})
+ };
+ should(() => {
+ node = new OscillatorNode(context, options);
+ }, 'new OscillatorNode(c, ' + JSON.stringify(options) + ')').notThrow();
+
+ options = {type: 'custom'};
+ should(
+ () => {
+ node = new OscillatorNode(context, options);
+ },
+ 'new OscillatorNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+
+ options = {
+ type: 'custom',
+ periodicWave: new PeriodicWave(context, {real: [1, 1]})
+ };
+ should(() => {
+ node = new OscillatorNode(context, options);
+ }, 'new OscillatorNode(c, ' + JSON.stringify(options) + ')').notThrow();
+
+ should(
+ () => {
+ node = new OscillatorNode(context, {periodicWave: null});
+ },
+ 'new OscillatorNode(c, {periodicWave: null}')
+ .throw(DOMException, 'TypeError');
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-limiting.html b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-limiting.html
new file mode 100644
index 0000000000..81a1293d03
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-limiting.html
@@ -0,0 +1,154 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Oscillator Detune Limits
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const sampleRate = 44100;
+ const renderLengthSeconds = 0.125;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'detune limits',
+ description:
+ 'Oscillator with detune and frequency at Nyquist or above'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(
+ 2, renderLengthSeconds * sampleRate, sampleRate);
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ // For test oscillator, set the oscillator frequency to -Nyquist and
+ // set detune to be a large number that would cause the detuned
+ // frequency to be way above Nyquist.
+ const oscFrequency = 1;
+ const detunedFrequency = sampleRate;
+ const detuneValue = Math.fround(1200 * Math.log2(detunedFrequency));
+
+ let testOsc = new OscillatorNode(
+ context, {frequency: oscFrequency, detune: detuneValue});
+ testOsc.connect(merger, 0, 1);
+
+ // For the reference oscillator, determine the computed oscillator
+ // frequency using the values above and set that as the oscillator
+ // frequency.
+ let computedFreq = oscFrequency * Math.pow(2, detuneValue / 1200);
+
+ let refOsc = new OscillatorNode(context, {frequency: computedFreq});
+ refOsc.connect(merger, 0, 0);
+
+ // Start 'em up and render
+ testOsc.start();
+ refOsc.start();
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ let expected = renderedBuffer.getChannelData(0);
+ let actual = renderedBuffer.getChannelData(1);
+
+ // Let user know about the smaple rate so following messages
+ // make more sense.
+ should(context.sampleRate, 'Context sample rate')
+ .beEqualTo(context.sampleRate);
+
+ // Since the frequency is at Nyquist, the reference oscillator
+ // output should be zero.
+ should(
+ refOsc.frequency.value, 'Reference oscillator frequency')
+ .beGreaterThanOrEqualTo(context.sampleRate / 2);
+ should(
+ expected, `Osc(freq: ${refOsc.frequency.value}) output`)
+ .beConstantValueOf(0);
+ // The output from each oscillator should be the same.
+ should(
+ actual,
+ 'Osc(freq: ' + oscFrequency + ', detune: ' + detuneValue +
+ ') output')
+ .beCloseToArray(expected, {absoluteThreshold: 0});
+
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'detune automation',
+ description:
+ 'Oscillator output with detune automation should be zero ' +
+ 'above Nyquist'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(
+ 1, renderLengthSeconds * sampleRate, sampleRate);
+
+ const baseFrequency = 1;
+ const rampEnd = renderLengthSeconds / 2;
+ const detuneEnd = 1e7;
+
+ let src = new OscillatorNode(context, {frequency: baseFrequency});
+ src.detune.linearRampToValueAtTime(detuneEnd, rampEnd);
+
+ src.connect(context.destination);
+
+ src.start();
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ let audio = renderedBuffer.getChannelData(0);
+
+ // At some point, the computed oscillator frequency will go
+ // above Nyquist. Determine at what time this occurrs. The
+ // computed frequency is f * 2^(d/1200) where |f| is the
+ // oscillator frequency and |d| is the detune value. Thus,
+ // find |d| such that Nyquist = f*2^(d/1200). That is, d =
+ // 1200*log2(Nyquist/f)
+ let criticalDetune =
+ 1200 * Math.log2(context.sampleRate / 2 / baseFrequency);
+
+ // Now figure out at what point on the linear ramp does the
+ // detune value reach this critical value. For a linear ramp:
+ //
+ // v(t) = V0+(V1-V0)*(t-T0)/(T1-T0)
+ //
+ // Thus,
+ //
+ // t = ((T1-T0)*v(t) + T0*V1 - T1*V0)/(V1-V0)
+ //
+ // In this test, T0 = 0, V0 = 0, T1 = rampEnd, V1 =
+ // detuneEnd, and v(t) = criticalDetune
+ let criticalTime = (rampEnd * criticalDetune) / detuneEnd;
+ let criticalFrame =
+ Math.ceil(criticalTime * context.sampleRate);
+
+ should(
+ criticalFrame,
+ `Frame where detuned oscillator reaches Nyquist`)
+ .beEqualTo(criticalFrame);
+
+ should(
+ audio.slice(0, criticalFrame),
+ `osc[0:${criticalFrame - 1}]`)
+ .notBeConstantValueOf(0);
+
+ should(audio.slice(criticalFrame), `osc[${criticalFrame}:]`)
+ .beConstantValueOf(0);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-overflow.html b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-overflow.html
new file mode 100644
index 0000000000..28c28bc1db
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/detune-overflow.html
@@ -0,0 +1,41 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test Osc.detune Overflow</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const sampleRate = 44100;
+ const renderLengthFrames = RENDER_QUANTUM_FRAMES;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('detune overflow', async (task, should) => {
+ let context =
+ new OfflineAudioContext(1, renderLengthFrames, sampleRate);
+
+ // This value of frequency and detune results in a computed frequency of
+ // 440*2^(153600/1200) = 1.497e41. The frequency needs to be clamped to
+ // Nyquist. But a sine wave at Nyquist frequency is all zeroes. Verify
+ // the output is 0.
+ let osc = new OscillatorNode(context, {frequency: 440, detune: 153600});
+
+ osc.connect(context.destination);
+
+ let buffer = await context.startRendering();
+ let output = buffer.getChannelData(0);
+ should(output, 'Osc freq and detune outside nominal range')
+ .beConstantValueOf(0);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/osc-basic-waveform.html b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/osc-basic-waveform.html
new file mode 100644
index 0000000000..b34c96855f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/osc-basic-waveform.html
@@ -0,0 +1,229 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Basic Oscillator Sine Wave Test
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // Don't change the sample rate. The tests below depend on this sample
+ // rate to cover all the cases in Chrome's implementation. But the tests
+ // are general and apply to any browser.
+ const sampleRate = 44100;
+
+ // Only need a few samples for testing, so just use two renders.
+ const durationFrames = 2 * RENDER_QUANTUM_FRAMES;
+
+ let audit = Audit.createTaskRunner();
+
+ // The following tests verify that the oscillator produces the same
+ // results as the mathematical oscillators. We choose sine wave and a
+ // custom wave because we know they're bandlimited and won't change with
+ // the frequency.
+ //
+ // The tests for 1 and 2 Hz are intended to test Chrome's interpolation
+ // algorithm, but are still generally applicable to any browser.
+
+ audit.define(
+ {label: 'Test 0', description: 'Sine wave: 100 Hz'},
+ async (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: durationFrames, sampleRate: sampleRate});
+
+ const freqHz = 100;
+
+ let src =
+ new OscillatorNode(context, {type: 'sine', frequency: freqHz});
+ src.connect(context.destination);
+
+ src.start();
+
+ let renderedBuffer = await context.startRendering();
+ checkResult(should, renderedBuffer, context, {
+ freqHz: freqHz,
+ a1: 0,
+ b1: 1,
+ prefix: 'Sine',
+ threshold: 1.8045e-6,
+ snrThreshold: 118.91
+ });
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 1', description: 'Sine wave: -100 Hz'},
+ async (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: durationFrames, sampleRate: sampleRate});
+
+ const freqHz = -100;
+
+ let src =
+ new OscillatorNode(context, {type: 'sine', frequency: freqHz});
+ src.connect(context.destination);
+
+ src.start();
+
+ let renderedBuffer = await context.startRendering();
+ checkResult(should, renderedBuffer, context, {
+ freqHz: freqHz,
+ a1: 0,
+ b1: 1,
+ prefix: 'Sine',
+ threshold: 4.7684e-7,
+ snrThreshold: 130.95
+ });
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 2', description: 'Sine wave: 2 Hz'},
+ async (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: durationFrames, sampleRate: sampleRate});
+
+ const freqHz = 2;
+
+ let src =
+ new OscillatorNode(context, {type: 'sine', frequency: freqHz});
+ src.connect(context.destination);
+
+ src.start();
+
+ let renderedBuffer = await context.startRendering();
+ checkResult(should, renderedBuffer, context, {
+ freqHz: freqHz,
+ a1: 0,
+ b1: 1,
+ prefix: 'Sine',
+ threshold: 1.4516e-7,
+ snrThreshold: 119.93
+ });
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 3', description: 'Sine wave: 1 Hz'},
+ async (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: durationFrames, sampleRate: sampleRate});
+
+ const freqHz = 1;
+
+ let src =
+ new OscillatorNode(context, {type: 'sine', frequency: freqHz});
+ src.connect(context.destination);
+
+ src.start();
+
+ let renderedBuffer = await context.startRendering();
+ checkResult(should, renderedBuffer, context, {
+ freqHz: freqHz,
+ a1: 0,
+ b1: 1,
+ prefix: 'Sine',
+ threshold: 1.4157e-7,
+ snrThreshold: 112.22
+ });
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 4', description: 'Custom wave: 100 Hz'},
+ async (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: durationFrames, sampleRate: sampleRate});
+
+ const freqHz = 100;
+
+ let wave = new PeriodicWave(
+ context,
+ {real: [0, 1], imag: [0, 1], disableNormalization: true});
+ let src = new OscillatorNode(
+ context,
+ {type: 'custom', frequency: freqHz, periodicWave: wave});
+ src.connect(context.destination);
+
+ src.start();
+
+ let renderedBuffer = await context.startRendering();
+ checkResult(should, renderedBuffer, context, {
+ freqHz: freqHz,
+ a1: 1,
+ b1: 1,
+ prefix: 'Custom',
+ threshold: 1.8478e-6,
+ snrThreshold: 122.43
+ });
+ task.done();
+ });
+
+ audit.define(
+ {label: 'Test 5', description: 'Custom wave: 1 Hz'},
+ async (task, should) => {
+ let context = new OfflineAudioContext(
+ {length: durationFrames, sampleRate: sampleRate});
+
+ const freqHz = 1;
+
+ let wave = new PeriodicWave(
+ context,
+ {real: [0, 1], imag: [0, 1], disableNormalization: true});
+ let src = new OscillatorNode(
+ context,
+ {type: 'custom', frequency: freqHz, periodicWave: wave});
+ src.connect(context.destination);
+
+ src.start();
+
+ let renderedBuffer = await context.startRendering();
+ checkResult(should, renderedBuffer, context, {
+ freqHz: freqHz,
+ a1: 1,
+ b1: 1,
+ prefix: 'Custom',
+ threshold: 4.7684e-7,
+ snrThreshold: 138.76
+ });
+ task.done();
+ });
+
+ audit.run();
+
+ function waveForm(context, freqHz, a1, b1, nsamples) {
+ let buffer =
+ new AudioBuffer({length: nsamples, sampleRate: context.sampleRate});
+ let signal = buffer.getChannelData(0);
+ const omega = 2 * Math.PI * freqHz / context.sampleRate;
+ for (let k = 0; k < nsamples; ++k) {
+ signal[k] = a1 * Math.cos(omega * k) + b1 * Math.sin(omega * k);
+ }
+
+ return buffer;
+ }
+
+ function checkResult(should, renderedBuffer, context, options) {
+ let {freqHz, a1, b1, prefix, threshold, snrThreshold} = options;
+
+ let actual = renderedBuffer.getChannelData(0);
+
+ let expected =
+ waveForm(context, freqHz, a1, b1, actual.length).getChannelData(0);
+
+ should(actual, `${prefix}: ${freqHz} Hz`).beCloseToArray(expected, {
+ absoluteThreshold: threshold
+ });
+
+ let snr = 10 * Math.log10(computeSNR(actual, expected));
+
+ should(snr, `${prefix}: SNR (db)`).beGreaterThanOrEqualTo(snrThreshold);
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html
new file mode 100644
index 0000000000..8aa73552aa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html
@@ -0,0 +1,140 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Panner Node Automation</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // Use a power-of-two to eliminate some round-off; otherwise, this isn't
+ // really important.
+ const sampleRate = 16384;
+
+ // Render enough for the test; we don't need a lot.
+ const renderFrames = 2048;
+
+ // Initial panner positionX and final positionX for listener.
+ const positionX = 2000;
+
+ const audit = Audit.createTaskRunner();
+
+ // Test that listener.positionX.value setter does the right thing.
+ audit.define('Set Listener.positionX.value', (task, should) => {
+ const context = new OfflineAudioContext(2, renderFrames, sampleRate);
+
+ createGraph(context);
+
+ // Frame at which the listener instantaneously moves to a new location.
+ const moveFrame = 512;
+
+ context.suspend(moveFrame / context.sampleRate)
+ .then(() => {
+ context.listener.positionX.value = positionX;
+ })
+ .then(() => context.resume());
+
+ verifyOutput(context, moveFrame, should, 'listenr.positionX.value')
+ .then(() => task.done());
+ });
+
+ // Test that listener.positionX.setValueAtTime() does the right thing.
+ audit.define('Listener.positionX.setValue', (task, should) => {
+ const context = new OfflineAudioContext(2, renderFrames, sampleRate);
+
+ createGraph(context);
+
+ // Frame at which the listener instantaneously moves to a new location.
+ const moveFrame = 512;
+
+ context.listener.positionX.setValueAtTime(
+ positionX, moveFrame / context.sampleRate);
+
+ verifyOutput(
+ context, moveFrame, should, 'listener.positionX.setValueATTime')
+ .then(() => task.done());
+ });
+
+ // Test that listener.setPosition() does the right thing.
+ audit.define('Listener.setPosition', (task, should) => {
+ const context = new OfflineAudioContext(2, renderFrames, sampleRate);
+
+ createGraph(context);
+
+ // Frame at which the listener instantaneously moves to a new location.
+ const moveFrame = 512;
+
+ context.suspend(moveFrame / context.sampleRate)
+ .then(() => {
+ context.listener.setPosition(positionX, 0, 0);
+ })
+ .then(() => context.resume());
+
+ verifyOutput(context, moveFrame, should, 'listener.setPostion')
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+
+ // Create the basic graph for testing which consists of an oscillator node
+ // connected to a panner node.
+ function createGraph(context) {
+ const listener = context.listener;
+
+ listener.positionX.value = 0;
+ listener.positionY.value = 0;
+ listener.positionZ.value = 0;
+
+ const src = new OscillatorNode(context);
+
+ const panner = new PannerNode(context, {
+ distanceModel: 'linear',
+ refDistance: 1,
+ maxDistance: 3000,
+ positionX: positionX,
+ positionY: 0,
+ positionZ: 0
+ });
+ src.connect(panner).connect(context.destination);
+
+ src.start();
+ }
+
+
+ // Verify the output from the panner is correct.
+ function verifyOutput(context, moveFrame, should, prefix) {
+ return context.startRendering().then(resultBuffer => {
+ // Get the outputs (left and right)
+ const c0 = resultBuffer.getChannelData(0);
+ const c1 = resultBuffer.getChannelData(1);
+
+ // The src/listener set up is such that audio should only come
+ // from the right for until |moveFrame|. Hence the left channel
+ // should be 0 (or very nearly 0).
+ const zero = new Float32Array(moveFrame);
+
+ should(
+ c0.slice(0, moveFrame), `${prefix}: output0[0:${moveFrame - 1}]`)
+ .beCloseToArray(zero, {absoluteThreshold: 1e-16});
+ should(
+ c1.slice(0, moveFrame), `${prefix}: output1[0:${moveFrame - 1}]`)
+ .notBeConstantValueOf(0);
+
+ // At |moveFrame| and beyond, the listener and source are at the
+ // same position, so the outputs from the left and right should be
+ // identical, and the left channel should not be 0 anymore.
+
+ should(c0.slice(moveFrame), `${prefix}: output0[${moveFrame}:]`)
+ .notBeConstantValueOf(0);
+ should(c1.slice(moveFrame), `${prefix}: output1[${moveFrame}:]`)
+ .beCloseToArray(c0.slice(moveFrame));
+ });
+ }
+ </script>
+ </body>
+</html>
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html
new file mode 100644
index 0000000000..c434aa8c6a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html
@@ -0,0 +1,468 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: Panner
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'PannerNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'PannerNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [
+ {name: 'panningModel', value: 'equalpower'},
+ {name: 'positionX', value: 0}, {name: 'positionY', value: 0},
+ {name: 'positionZ', value: 0}, {name: 'orientationX', value: 1},
+ {name: 'orientationY', value: 0}, {name: 'orientationZ', value: 0},
+ {name: 'distanceModel', value: 'inverse'},
+ {name: 'refDistance', value: 1}, {name: 'maxDistance', value: 10000},
+ {name: 'rolloffFactor', value: 1},
+ {name: 'coneInnerAngle', value: 360},
+ {name: 'coneOuterAngle', value: 360},
+ {name: 'coneOuterGain', value: 0}
+ ]);
+
+ // Test the listener too, while we're at it.
+ let listenerAttributes = [
+ {name: 'positionX', value: 0},
+ {name: 'positionY', value: 0},
+ {name: 'positionZ', value: 0},
+ {name: 'forwardX', value: 0},
+ {name: 'forwardY', value: 0},
+ {name: 'forwardZ', value: -1},
+ {name: 'upX', value: 0},
+ {name: 'upY', value: 1},
+ {name: 'upZ', value: 0},
+ ];
+
+ listenerAttributes.forEach((item) => {
+ should(
+ context.listener[item.name].value,
+ 'context.listener.' + item.name + '.value')
+ .beEqualTo(item.value);
+ });
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ // Can't use testAudioNodeOptions because the constraints for this node
+ // are not supported there.
+ let node;
+ let success = true;
+
+ // Test that we can set the channel count to 1 or 2.
+ let options = {channelCount: 1};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node1 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.channelCount, 'node1.channelCount')
+ .beEqualTo(options.channelCount);
+
+ options = {channelCount: 2};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node2 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.channelCount, 'node2.channelCount')
+ .beEqualTo(options.channelCount);
+
+ // Test that other channel counts throw an error
+ options = {channelCount: 0};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.channelCount = options.channelCount;
+ },
+ `node.channelCount = ${options.channelCount}`)
+ .throw(DOMException, "NotSupportedError");
+ should(node.channelCount,
+ `node.channelCount after setting to ${options.channelCount}`)
+ .beEqualTo(2);
+
+ options = {channelCount: 3};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.channelCount = options.channelCount;
+ },
+ `node.channelCount = ${options.channelCount}`)
+ .throw(DOMException, "NotSupportedError");
+ should(node.channelCount,
+ `node.channelCount after setting to ${options.channelCount}`)
+ .beEqualTo(2);
+
+ options = {channelCount: 99};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.channelCount = options.channelCount;
+ },
+ `node.channelCount = ${options.channelCount}`)
+ .throw(DOMException, "NotSupportedError");
+ should(node.channelCount,
+ `node.channelCount after setting to ${options.channelCount}`)
+ .beEqualTo(2);
+
+ // Test channelCountMode. A mode of "max" is illegal, but others are
+ // ok.
+ options = {channelCountMode: 'clamped-max'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node3 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.channelCountMode, 'node3.channelCountMode')
+ .beEqualTo(options.channelCountMode);
+
+ options = {channelCountMode: 'explicit'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node4 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.channelCountMode, 'node4.channelCountMode')
+ .beEqualTo(options.channelCountMode);
+
+ options = {channelCountMode: 'max'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'NotSupportedError');
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.channelCountMode = options.channelCountMode;
+ },
+ `node.channelCountMode = ${options.channelCountMode}`)
+ .throw(DOMException, "NotSupportedError");
+ should(node.channelCountMode,
+ `node.channelCountMode after setting to ${options.channelCountMode}`)
+ .beEqualTo("clamped-max");
+
+ options = {channelCountMode: 'foobar'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, " + JSON.stringify(options) + ")')
+ .throw(TypeError);
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.channelCountMode = options.channelCountMode;
+ },
+ `node.channelCountMode = ${options.channelCountMode}`)
+ .notThrow(); // Invalid assignment to enum-valued attrs does not throw.
+ should(node.channelCountMode,
+ `node.channelCountMode after setting to ${options.channelCountMode}`)
+ .beEqualTo("clamped-max");
+
+ // Test channelInterpretation.
+ options = {channelInterpretation: 'speakers'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node5 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.channelInterpretation, 'node5.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ options = {channelInterpretation: 'discrete'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node6 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.channelInterpretation, 'node6.channelInterpretation')
+ .beEqualTo(options.channelInterpretation);
+
+ options = {channelInterpretation: 'foobar'};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(TypeError);
+
+ // Test maxDistance
+ options = {maxDistance: -1};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(RangeError);
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.maxDistance = options.maxDistance;
+ },
+ `node.maxDistance = ${options.maxDistance}`)
+ .throw(RangeError);
+ should(node.maxDistance,
+ `node.maxDistance after setting to ${options.maxDistance}`)
+ .beEqualTo(10000);
+
+ options = {maxDistance: 100};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node7 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.maxDistance, 'node7.maxDistance')
+ .beEqualTo(options.maxDistance);
+
+ // Test rolloffFactor
+ options = {rolloffFactor: -1};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(RangeError);
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.rolloffFactor = options.rolloffFactor;
+ },
+ `node.rolloffFactor = ${options.rolloffFactor}`)
+ .throw(RangeError);
+ should(node.rolloffFactor,
+ `node.rolloffFactor after setting to ${options.rolloffFactor}`)
+ .beEqualTo(1);
+
+ options = {rolloffFactor: 0};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node8 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.rolloffFactor, 'node8.rolloffFactor')
+ .beEqualTo(options.rolloffFactor);
+
+ options = {rolloffFactor: 0.5};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node8 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.rolloffFactor, 'node8.rolloffFactor')
+ .beEqualTo(options.rolloffFactor);
+
+ options = {rolloffFactor: 100};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node8 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.rolloffFactor, 'node8.rolloffFactor')
+ .beEqualTo(options.rolloffFactor);
+
+ // Test coneOuterGain
+ options = {coneOuterGain: -1};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.coneOuterGain = options.coneOuterGain;
+ },
+ `node.coneOuterGain = ${options.coneOuterGain}`)
+ .throw(DOMException, 'InvalidStateError');
+ should(node.coneOuterGain,
+ `node.coneOuterGain after setting to ${options.coneOuterGain}`)
+ .beEqualTo(0);
+
+ options = {coneOuterGain: 1.1};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .throw(DOMException, 'InvalidStateError');
+ should(
+ () => {
+ node = new PannerNode(context);
+ node.coneOuterGain = options.coneOuterGain;
+ },
+ `node.coneOuterGain = ${options.coneOuterGain}`)
+ .throw(DOMException, 'InvalidStateError');
+ should(node.coneOuterGain,
+ `node.coneOuterGain after setting to ${options.coneOuterGain}`)
+ .beEqualTo(0);
+
+ options = {coneOuterGain: 0.0};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node9 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.coneOuterGain, 'node9.coneOuterGain')
+ .beEqualTo(options.coneOuterGain);
+ options = {coneOuterGain: 0.5};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node9 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.coneOuterGain, 'node9.coneOuterGain')
+ .beEqualTo(options.coneOuterGain);
+
+ options = {coneOuterGain: 1.0};
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node9 = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node.coneOuterGain, 'node9.coneOuterGain')
+ .beEqualTo(options.coneOuterGain);
+
+ task.done();
+ });
+
+ audit.define('constructor with options', (task, should) => {
+ let node;
+ let success = true;
+ let options = {
+ panningModel: 'HRTF',
+ // We use full double float values here to verify also that the actual
+ // AudioParam value is properly rounded to a float. The actual value
+ // is immaterial as long as x != Math.fround(x).
+ positionX: Math.SQRT2,
+ positionY: 2 * Math.SQRT2,
+ positionZ: 3 * Math.SQRT2,
+ orientationX: -Math.SQRT2,
+ orientationY: -2 * Math.SQRT2,
+ orientationZ: -3 * Math.SQRT2,
+ distanceModel: 'linear',
+ // We use full double float values here to verify also that the actual
+ // attribute is a double float. The actual value is immaterial as
+ // long as x != Math.fround(x).
+ refDistance: Math.PI,
+ maxDistance: 2 * Math.PI,
+ rolloffFactor: 3 * Math.PI,
+ coneInnerAngle: 4 * Math.PI,
+ coneOuterAngle: 5 * Math.PI,
+ coneOuterGain: 0.1 * Math.PI
+ };
+
+ should(
+ () => {
+ node = new PannerNode(context, options);
+ },
+ 'node = new PannerNode(c, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(node instanceof PannerNode, 'node instanceof PannerNode')
+ .beEqualTo(true);
+
+ should(node.panningModel, 'node.panningModel')
+ .beEqualTo(options.panningModel);
+ should(node.positionX.value, 'node.positionX.value')
+ .beEqualTo(Math.fround(options.positionX));
+ should(node.positionY.value, 'node.positionY.value')
+ .beEqualTo(Math.fround(options.positionY));
+ should(node.positionZ.value, 'node.positionZ.value')
+ .beEqualTo(Math.fround(options.positionZ));
+ should(node.orientationX.value, 'node.orientationX.value')
+ .beEqualTo(Math.fround(options.orientationX));
+ should(node.orientationY.value, 'node.orientationY.value')
+ .beEqualTo(Math.fround(options.orientationY));
+ should(node.orientationZ.value, 'node.orientationZ.value')
+ .beEqualTo(Math.fround(options.orientationZ));
+ should(node.distanceModel, 'node.distanceModel')
+ .beEqualTo(options.distanceModel);
+ should(node.refDistance, 'node.refDistance')
+ .beEqualTo(options.refDistance);
+ should(node.maxDistance, 'node.maxDistance')
+ .beEqualTo(options.maxDistance);
+ should(node.rolloffFactor, 'node.rolloffFactor')
+ .beEqualTo(options.rolloffFactor);
+ should(node.coneInnerAngle, 'node.coneInnerAngle')
+ .beEqualTo(options.coneInnerAngle);
+ should(node.coneOuterAngle, 'node.coneOuterAngle')
+ .beEqualTo(options.coneOuterAngle);
+ should(node.coneOuterGain, 'node.coneOuterGain')
+ .beEqualTo(options.coneOuterGain);
+
+ should(node.channelCount, 'node.channelCount').beEqualTo(2);
+ should(node.channelCountMode, 'node.channelCountMode')
+ .beEqualTo('clamped-max');
+ should(node.channelInterpretation, 'node.channelInterpretation')
+ .beEqualTo('speakers');
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-exponential.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-exponential.html
new file mode 100644
index 0000000000..383e2c67b6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-exponential.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ distance-exponential.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/distance-model-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'Exponential distance model for PannerNode'
+ },
+ (task, should) => {
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ createTestAndRun(context, 'exponential', should)
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-inverse.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-inverse.html
new file mode 100644
index 0000000000..a4ff984e09
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-inverse.html
@@ -0,0 +1,28 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ distance-inverse.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/distance-model-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('test', (task, should) => {
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ createTestAndRun(context, 'inverse', should).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-linear.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-linear.html
new file mode 100644
index 0000000000..812fea3eba
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/distance-linear.html
@@ -0,0 +1,30 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ distance-linear.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/distance-model-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {label: 'test', description: 'Linear distance model PannerNode'},
+ (task, should) => {
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ createTestAndRun(context, 'linear', should).then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html
new file mode 100644
index 0000000000..5c3df0e6fd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html
@@ -0,0 +1,298 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Basic PannerNode with Automation Position Properties
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/panner-formulas.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+
+ // These tests are quite slow, so don't run for many frames. 256 frames
+ // should be enough to demonstrate that automations are working.
+ let renderFrames = 256;
+ let renderDuration = renderFrames / sampleRate;
+
+ let audit = Audit.createTaskRunner();
+
+ // Array of tests for setting the panner positions. These tests basically
+ // verify that the position setters for the panner and listener are
+ // working correctly.
+ let testConfig = [
+ {
+ setter: 'positionX',
+ },
+ {
+ setter: 'positionY',
+ },
+ {
+ setter: 'positionZ',
+ }
+ ];
+
+ // Create tests for the panner position setters. Both mono and steroe
+ // sources are tested.
+ for (let k = 0; k < testConfig.length; ++k) {
+ let config = testConfig[k];
+ // Function to create the test to define the test.
+ let tester = function(config, channelCount) {
+ return (task, should) => {
+ let nodes = createGraph(channelCount);
+ let {context, source, panner} = nodes;
+
+ let message = channelCount == 1 ? 'Mono' : 'Stereo';
+ message += ' panner.' + config.setter;
+
+ testPositionSetter(should, {
+ nodes: nodes,
+ pannerSetter: panner[config.setter],
+ message: message
+ }).then(() => task.done());
+ }
+ };
+
+ audit.define('Stereo panner.' + config.setter, tester(config, 2));
+ audit.define('Mono panner.' + config.setter, tester(config, 1));
+ }
+
+ // Create tests for the listener position setters. Both mono and steroe
+ // sources are tested.
+ for (let k = 0; k < testConfig.length; ++k) {
+ let config = testConfig[k];
+ // Function to create the test to define the test.
+ let tester = function(config, channelCount) {
+ return (task, should) => {
+ let nodes = createGraph(channelCount);
+ let {context, source, panner} = nodes;
+
+ let message = channelCount == 1 ? 'Mono' : 'Stereo';
+ message += ' listener.' + config.setter;
+
+ // Some relatively arbitrary (non-default) position for the source
+ // location.
+ panner.setPosition(1, 0, 1);
+
+ testPositionSetter(should, {
+ nodes: nodes,
+ pannerSetter: context.listener[config.setter],
+ message: message
+ }).then(() => task.done());
+ }
+ };
+
+ audit.define('Stereo listener.' + config.setter, tester(config, 2));
+ audit.define('Mono listener.' + config.setter, tester(config, 1));
+ }
+
+ // Test setPosition method.
+ audit.define('setPosition', (task, should) => {
+ let {context, panner, source} = createGraph(2);
+
+ // Initialize source position (values don't really matter).
+ panner.setPosition(1, 1, 1);
+
+ // After some (unimportant) time, move the panner to a (any) new
+ // location.
+ let suspendFrame = 128;
+ context.suspend(suspendFrame / sampleRate)
+ .then(function() {
+ panner.setPosition(-100, 2000, 8000);
+ })
+ .then(context.resume.bind(context));
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ verifyPannerOutputChanged(
+ should, resultBuffer,
+ {message: 'setPosition', suspendFrame: suspendFrame});
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('orientation setter', (task, should) => {
+ let {context, panner, source} = createGraph(2);
+
+ // For orientation to matter, we need to make the source directional,
+ // and also move away from the listener (because the default location is
+ // 0,0,0).
+ panner.setPosition(0, 0, 1);
+ panner.coneInnerAngle = 0;
+ panner.coneOuterAngle = 360;
+ panner.coneOuterGain = .001;
+
+ // After some (unimportant) time, change the panner orientation to a new
+ // orientation. The only constraint is that the orientation changes
+ // from before.
+ let suspendFrame = 128;
+ context.suspend(suspendFrame / sampleRate)
+ .then(function() {
+ panner.orientationX.value = -100;
+ panner.orientationY.value = 2000;
+ panner.orientationZ.value = 8000;
+ })
+ .then(context.resume.bind(context));
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ verifyPannerOutputChanged(should, resultBuffer, {
+ message: 'panner.orientation{XYZ}',
+ suspendFrame: suspendFrame
+ });
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('forward setter', (task, should) => {
+ let {context, panner, source} = createGraph(2);
+
+ // For orientation to matter, we need to make the source directional,
+ // and also move away from the listener (because the default location is
+ // 0,0,0).
+ panner.setPosition(0, 0, 1);
+ panner.coneInnerAngle = 0;
+ panner.coneOuterAngle = 360;
+ panner.coneOuterGain = .001;
+
+ // After some (unimportant) time, change the panner orientation to a new
+ // orientation. The only constraint is that the orientation changes
+ // from before.
+ let suspendFrame = 128;
+ context.suspend(suspendFrame / sampleRate)
+ .then(function() {
+ context.listener.forwardX.value = -100;
+ context.listener.forwardY.value = 2000;
+ context.listener.forwardZ.value = 8000;
+ })
+ .then(context.resume.bind(context));
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ verifyPannerOutputChanged(should, resultBuffer, {
+ message: 'listener.forward{XYZ}',
+ suspendFrame: suspendFrame
+ });
+ })
+ .then(() => task.done());
+ });
+
+ audit.define('up setter', (task, should) => {
+ let {context, panner, source} = createGraph(2);
+
+ // For orientation to matter, we need to make the source directional,
+ // and also move away from the listener (because the default location is
+ // 0,0,0).
+ panner.setPosition(0, 0, 1);
+ panner.coneInnerAngle = 0;
+ panner.coneOuterAngle = 360;
+ panner.coneOuterGain = .001;
+ panner.setPosition(1, 0, 1);
+
+ // After some (unimportant) time, change the panner orientation to a new
+ // orientation. The only constraint is that the orientation changes
+ // from before.
+ let suspendFrame = 128;
+ context.suspend(suspendFrame / sampleRate)
+ .then(function() {
+ context.listener.upX.value = 100;
+ context.listener.upY.value = 100;
+ context.listener.upZ.value = 100;
+ ;
+ })
+ .then(context.resume.bind(context));
+
+ context.startRendering()
+ .then(function(resultBuffer) {
+ verifyPannerOutputChanged(
+ should, resultBuffer,
+ {message: 'listener.up{XYZ}', suspendFrame: suspendFrame});
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+
+ function createGraph(channelCount) {
+ let context = new OfflineAudioContext(2, renderFrames, sampleRate);
+ let panner = context.createPanner();
+ let source = context.createBufferSource();
+ source.buffer =
+ createConstantBuffer(context, 1, channelCount == 1 ? 1 : [1, 2]);
+ source.loop = true;
+
+ source.connect(panner);
+ panner.connect(context.destination);
+
+ source.start();
+ return {context: context, source: source, panner: panner};
+ }
+
+ function testPositionSetter(should, options) {
+ let {nodes, pannerSetter, message} = options;
+
+ let {context, source, panner} = nodes;
+
+ // Set panner x position. (Value doesn't matter);
+ pannerSetter.value = 1;
+
+ // Wait a bit and set a new position. (Actual time and position doesn't
+ // matter).
+ let suspendFrame = 128;
+ context.suspend(suspendFrame / sampleRate)
+ .then(function() {
+ pannerSetter.value = 10000;
+ })
+ .then(context.resume.bind(context));
+
+ return context.startRendering().then(function(resultBuffer) {
+ verifyPannerOutputChanged(
+ should, resultBuffer,
+ {message: message, suspendFrame: suspendFrame});
+ });
+ }
+
+ function verifyPannerOutputChanged(should, resultBuffer, options) {
+ let {message, suspendFrame} = options;
+ // Verify that the first part of output is constant. (Doesn't matter
+ // what.)
+ let data0 = resultBuffer.getChannelData(0);
+ let data1 = resultBuffer.getChannelData(1);
+
+ let middle = '[0, ' + suspendFrame + ') ';
+ should(
+ data0.slice(0, suspendFrame),
+ message + '.value frame ' + middle + 'channel 0')
+ .beConstantValueOf(data0[0]);
+ should(
+ data1.slice(0, suspendFrame),
+ message + '.value frame ' + middle + 'channel 1')
+ .beConstantValueOf(data1[0]);
+
+ // The rest after suspendTime should be constant and different from the
+ // first part.
+ middle = '[' + suspendFrame + ', ' + renderFrames + ') ';
+ should(
+ data0.slice(suspendFrame),
+ message + '.value frame ' + middle + 'channel 0')
+ .beConstantValueOf(data0[suspendFrame]);
+ should(
+ data1.slice(suspendFrame),
+ message + '.value frame ' + middle + 'channel 1')
+ .beConstantValueOf(data1[suspendFrame]);
+ should(
+ data0[suspendFrame],
+ message + ': Output at frame ' + suspendFrame + ' channel 0')
+ .notBeEqualTo(data0[0]);
+ should(
+ data1[suspendFrame],
+ message + ': Output at frame ' + suspendFrame + ' channel 1')
+ .notBeEqualTo(data1[0]);
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-equalpower-stereo.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-equalpower-stereo.html
new file mode 100644
index 0000000000..7afc9c2a39
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-equalpower-stereo.html
@@ -0,0 +1,47 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ panner-automation-equalpower-stereo.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/panner-model-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // To test the panner, we create a number of panner nodes
+ // equally spaced on a semicircle at unit distance. The
+ // semicircle covers the azimuth range from -90 to 90 deg,
+ // covering full left to full right. Each source is an impulse
+ // turning at a different time and we check that the rendered
+ // impulse has the expected gain.
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Equal-power panner model of AudioPannerNode with stereo source',
+ },
+ (task, should) => {
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ createTestAndRun(
+ context, should, nodesToCreate, 2,
+ function(panner, x, y, z) {
+ panner.positionX.value = x;
+ panner.positionY.value = y;
+ panner.positionZ.value = z;
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html
new file mode 100644
index 0000000000..8e09e869ac
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html
@@ -0,0 +1,265 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Automation of PannerNode Positions
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/panner-formulas.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let sampleRate = 48000;
+ // These tests are quite slow, so don't run for many frames. 256 frames
+ // should be enough to demonstrate that automations are working.
+ let renderFrames = 256;
+ let renderDuration = renderFrames / sampleRate;
+
+ let context;
+ let panner;
+
+ let audit = Audit.createTaskRunner();
+
+ // Set of tests for the panner node with automations applied to the
+ // position of the source.
+ let testConfigs = [
+ {
+ // Distance model parameters for the panner
+ distanceModel: {model: 'inverse', rolloff: 1},
+ // Initial location of the source
+ startPosition: [0, 0, 1],
+ // Final position of the source. For this test, we only want to move
+ // on the z axis which
+ // doesn't change the azimuth angle.
+ endPosition: [0, 0, 10000],
+ },
+ {
+ distanceModel: {model: 'inverse', rolloff: 1},
+ startPosition: [0, 0, 1],
+ // An essentially random end position, but it should be such that
+ // azimuth angle changes as
+ // we move from the start to the end.
+ endPosition: [20000, 30000, 10000],
+ errorThreshold: [
+ {
+ // Error threshold for 1-channel case
+ relativeThreshold: 4.8124e-7
+ },
+ {
+ // Error threshold for 2-channel case
+ relativeThreshold: 4.3267e-7
+ }
+ ],
+ },
+ {
+ distanceModel: {model: 'exponential', rolloff: 1.5},
+ startPosition: [0, 0, 1],
+ endPosition: [20000, 30000, 10000],
+ errorThreshold:
+ [{relativeThreshold: 5.0783e-7}, {relativeThreshold: 5.2180e-7}]
+ },
+ {
+ distanceModel: {model: 'linear', rolloff: 1},
+ startPosition: [0, 0, 1],
+ endPosition: [20000, 30000, 10000],
+ errorThreshold: [
+ {relativeThreshold: 6.5324e-6}, {relativeThreshold: 6.5756e-6}
+ ]
+ }
+ ];
+
+ for (let k = 0; k < testConfigs.length; ++k) {
+ let config = testConfigs[k];
+ let tester = function(c, channelCount) {
+ return (task, should) => {
+ runTest(should, c, channelCount).then(() => task.done());
+ }
+ };
+
+ let baseTestName = config.distanceModel.model +
+ ' rolloff: ' + config.distanceModel.rolloff;
+
+ // Define tasks for both 1-channel and 2-channel
+ audit.define(k + ': 1-channel ' + baseTestName, tester(config, 1));
+ audit.define(k + ': 2-channel ' + baseTestName, tester(config, 2));
+ }
+
+ audit.run();
+
+ function runTest(should, options, channelCount) {
+ // Output has 5 channels: channels 0 and 1 are for the stereo output of
+ // the panner node. Channels 2-5 are the for automation of the x,y,z
+ // coordinate so that we have actual coordinates used for the panner
+ // automation.
+ context = new OfflineAudioContext(5, renderFrames, sampleRate);
+
+ // Stereo source for the panner.
+ let source = context.createBufferSource();
+ source.buffer = createConstantBuffer(
+ context, renderFrames, channelCount == 1 ? 1 : [1, 2]);
+
+ panner = context.createPanner();
+ panner.distanceModel = options.distanceModel.model;
+ panner.rolloffFactor = options.distanceModel.rolloff;
+ panner.panningModel = 'equalpower';
+
+ // Source and gain node for the z-coordinate calculation.
+ let dist = context.createBufferSource();
+ dist.buffer = createConstantBuffer(context, 1, 1);
+ dist.loop = true;
+ let gainX = context.createGain();
+ let gainY = context.createGain();
+ let gainZ = context.createGain();
+ dist.connect(gainX);
+ dist.connect(gainY);
+ dist.connect(gainZ);
+
+ // Set the gain automation to match the z-coordinate automation of the
+ // panner.
+
+ // End the automation some time before the end of the rendering so we
+ // can verify that automation has the correct end time and value.
+ let endAutomationTime = 0.75 * renderDuration;
+
+ gainX.gain.setValueAtTime(options.startPosition[0], 0);
+ gainX.gain.linearRampToValueAtTime(
+ options.endPosition[0], endAutomationTime);
+ gainY.gain.setValueAtTime(options.startPosition[1], 0);
+ gainY.gain.linearRampToValueAtTime(
+ options.endPosition[1], endAutomationTime);
+ gainZ.gain.setValueAtTime(options.startPosition[2], 0);
+ gainZ.gain.linearRampToValueAtTime(
+ options.endPosition[2], endAutomationTime);
+
+ dist.start();
+
+ // Splitter and merger to map the panner output and the z-coordinate
+ // automation to the correct channels in the destination.
+ let splitter = context.createChannelSplitter(2);
+ let merger = context.createChannelMerger(5);
+
+ source.connect(panner);
+ // Split the output of the panner to separate channels
+ panner.connect(splitter);
+
+ // Merge the panner outputs and the z-coordinate output to the correct
+ // destination channels.
+ splitter.connect(merger, 0, 0);
+ splitter.connect(merger, 1, 1);
+ gainX.connect(merger, 0, 2);
+ gainY.connect(merger, 0, 3);
+ gainZ.connect(merger, 0, 4);
+
+ merger.connect(context.destination);
+
+ // Initialize starting point of the panner.
+ panner.positionX.setValueAtTime(options.startPosition[0], 0);
+ panner.positionY.setValueAtTime(options.startPosition[1], 0);
+ panner.positionZ.setValueAtTime(options.startPosition[2], 0);
+
+ // Automate z coordinate to move away from the listener
+ panner.positionX.linearRampToValueAtTime(
+ options.endPosition[0], 0.75 * renderDuration);
+ panner.positionY.linearRampToValueAtTime(
+ options.endPosition[1], 0.75 * renderDuration);
+ panner.positionZ.linearRampToValueAtTime(
+ options.endPosition[2], 0.75 * renderDuration);
+
+ source.start();
+
+ // Go!
+ return context.startRendering().then(function(renderedBuffer) {
+ // Get the panner outputs
+ let data0 = renderedBuffer.getChannelData(0);
+ let data1 = renderedBuffer.getChannelData(1);
+ let xcoord = renderedBuffer.getChannelData(2);
+ let ycoord = renderedBuffer.getChannelData(3);
+ let zcoord = renderedBuffer.getChannelData(4);
+
+ // We're doing a linear ramp on the Z axis with the equalpower panner,
+ // so the equalpower panning gain remains constant. We only need to
+ // model the distance effect.
+
+ // Compute the distance gain
+ let distanceGain = new Float32Array(xcoord.length);
+ ;
+
+ if (panner.distanceModel === 'inverse') {
+ for (let k = 0; k < distanceGain.length; ++k) {
+ distanceGain[k] =
+ inverseDistance(panner, xcoord[k], ycoord[k], zcoord[k])
+ }
+ } else if (panner.distanceModel === 'linear') {
+ for (let k = 0; k < distanceGain.length; ++k) {
+ distanceGain[k] =
+ linearDistance(panner, xcoord[k], ycoord[k], zcoord[k])
+ }
+ } else if (panner.distanceModel === 'exponential') {
+ for (let k = 0; k < distanceGain.length; ++k) {
+ distanceGain[k] =
+ exponentialDistance(panner, xcoord[k], ycoord[k], zcoord[k])
+ }
+ }
+
+ // Compute the expected result. Since we're on the z-axis, the left
+ // and right channels pass through the equalpower panner unchanged.
+ // Only need to apply the distance gain.
+ let buffer0 = source.buffer.getChannelData(0);
+ let buffer1 =
+ channelCount == 2 ? source.buffer.getChannelData(1) : buffer0;
+
+ let azimuth = new Float32Array(buffer0.length);
+
+ for (let k = 0; k < data0.length; ++k) {
+ azimuth[k] = calculateAzimuth(
+ [xcoord[k], ycoord[k], zcoord[k]],
+ [
+ context.listener.positionX.value,
+ context.listener.positionY.value,
+ context.listener.positionZ.value
+ ],
+ [
+ context.listener.forwardX.value,
+ context.listener.forwardY.value,
+ context.listener.forwardZ.value
+ ],
+ [
+ context.listener.upX.value, context.listener.upY.value,
+ context.listener.upZ.value
+ ]);
+ }
+
+ let expected = applyPanner(azimuth, buffer0, buffer1, channelCount);
+ let expected0 = expected.left;
+ let expected1 = expected.right;
+
+ for (let k = 0; k < expected0.length; ++k) {
+ expected0[k] *= distanceGain[k];
+ expected1[k] *= distanceGain[k];
+ }
+
+ let info = options.distanceModel.model +
+ ', rolloff: ' + options.distanceModel.rolloff;
+ let prefix = channelCount + '-channel ' +
+ '[' + options.startPosition[0] + ', ' + options.startPosition[1] +
+ ', ' + options.startPosition[2] + '] -> [' +
+ options.endPosition[0] + ', ' + options.endPosition[1] + ', ' +
+ options.endPosition[2] + ']: ';
+
+ let errorThreshold = 0;
+
+ if (options.errorThreshold)
+ errorThreshold = options.errorThreshold[channelCount - 1]
+
+ should(data0, prefix + 'distanceModel: ' + info + ', left channel')
+ .beCloseToArray(expected0, {absoluteThreshold: errorThreshold});
+ should(data1, prefix + 'distanceModel: ' + info + ', right channel')
+ .beCloseToArray(expected1, {absoluteThreshold: errorThreshold});
+ });
+ }
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-azimuth.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-azimuth.html
new file mode 100644
index 0000000000..d09f2ec352
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-azimuth.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>Test Panner Azimuth Calculation</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ const audit = Audit.createTaskRunner();
+
+ // Fairly arbitrary sample rate
+ const sampleRate = 16000;
+
+ audit.define('Azimuth calculation', (task, should) => {
+ // Two channels for the context so we can see each channel of the
+ // panner node.
+ let context = new OfflineAudioContext(2, sampleRate, sampleRate);
+
+ let src = new ConstantSourceNode(context);
+ let panner = new PannerNode(context);
+
+ src.connect(panner).connect(context.destination);
+
+ // The source is still pointed directly at the listener, but is now
+ // directly above. The audio should be the same in both the left and
+ // right channels.
+ panner.positionY.value = 1;
+
+ src.start();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ // The left and right channels should contain the same signal.
+ let c0 = audioBuffer.getChannelData(0);
+ let c1 = audioBuffer.getChannelData(1);
+
+ let expected = Math.fround(Math.SQRT1_2);
+
+ should(c0, 'Left channel').beConstantValueOf(expected);
+ should(c1, 'Righteft channel').beConstantValueOf(expected);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-distance-clamping.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-distance-clamping.html
new file mode 100644
index 0000000000..78c1ec6dc2
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-distance-clamping.html
@@ -0,0 +1,227 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Clamping of Distance for PannerNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Arbitrary sample rate and render length.
+ let sampleRate = 48000;
+ let renderFrames = 128;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('ref-distance-error', (task, should) => {
+ testDistanceLimits(should, {name: 'refDistance', isZeroAllowed: true});
+ task.done();
+ });
+
+ audit.define('max-distance-error', (task, should) => {
+ testDistanceLimits(should, {name: 'maxDistance', isZeroAllowed: false});
+ task.done();
+ });
+
+ function testDistanceLimits(should, options) {
+ // Verify that exceptions are thrown for invalid values of refDistance.
+ let context = new OfflineAudioContext(1, renderFrames, sampleRate);
+
+ let attrName = options.name;
+ let prefix = 'new PannerNode(c, {' + attrName + ': ';
+
+ should(function() {
+ let nodeOptions = {};
+ nodeOptions[attrName] = -1;
+ new PannerNode(context, nodeOptions);
+ }, prefix + '-1})').throw(RangeError);
+
+ if (options.isZeroAllowed) {
+ should(function() {
+ let nodeOptions = {};
+ nodeOptions[attrName] = 0;
+ new PannerNode(context, nodeOptions);
+ }, prefix + '0})').notThrow();
+ } else {
+ should(function() {
+ let nodeOptions = {};
+ nodeOptions[attrName] = 0;
+ new PannerNode(context, nodeOptions);
+ }, prefix + '0})').throw(RangeError);
+ }
+
+ // The smallest representable positive single float.
+ let leastPositiveDoubleFloat = 4.9406564584124654e-324;
+
+ should(function() {
+ let nodeOptions = {};
+ nodeOptions[attrName] = leastPositiveDoubleFloat;
+ new PannerNode(context, nodeOptions);
+ }, prefix + leastPositiveDoubleFloat + '})').notThrow();
+
+ prefix = 'panner.' + attrName + ' = ';
+ panner = new PannerNode(context);
+ should(function() {
+ panner[attrName] = -1;
+ }, prefix + '-1').throw(RangeError);
+
+ if (options.isZeroAllowed) {
+ should(function() {
+ panner[attrName] = 0;
+ }, prefix + '0').notThrow();
+ } else {
+ should(function() {
+ panner[attrName] = 0;
+ }, prefix + '0').throw(RangeError);
+ }
+
+ should(function() {
+ panner[attrName] = leastPositiveDoubleFloat;
+ }, prefix + leastPositiveDoubleFloat).notThrow();
+ }
+
+ audit.define('min-distance', async (task, should) => {
+ // Test clamping of panner distance to refDistance for all of the
+ // distance models. The actual distance is arbitrary as long as it's
+ // less than refDistance. We test default and non-default values for
+ // the panner's refDistance and maxDistance.
+ // correctly.
+ await runTest(should, {
+ distance: 0.01,
+ distanceModel: 'linear',
+ });
+ await runTest(should, {
+ distance: 0.01,
+ distanceModel: 'exponential',
+ });
+ await runTest(should, {
+ distance: 0.01,
+ distanceModel: 'inverse',
+ });
+ await runTest(should, {
+ distance: 2,
+ distanceModel: 'linear',
+ maxDistance: 1000,
+ refDistance: 10,
+ });
+ await runTest(should, {
+ distance: 2,
+ distanceModel: 'exponential',
+ maxDistance: 1000,
+ refDistance: 10,
+ });
+ await runTest(should, {
+ distance: 2,
+ distanceModel: 'inverse',
+ maxDistance: 1000,
+ refDistance: 10,
+ });
+ task.done();
+ });
+
+ audit.define('max-distance', async (task, should) => {
+ // Like the "min-distance" task, but for clamping to the max
+ // distance. The actual distance is again arbitrary as long as it is
+ // greater than maxDistance.
+ await runTest(should, {
+ distance: 20000,
+ distanceModel: 'linear',
+ });
+ await runTest(should, {
+ distance: 21000,
+ distanceModel: 'exponential',
+ });
+ await runTest(should, {
+ distance: 23000,
+ distanceModel: 'inverse',
+ });
+ await runTest(should, {
+ distance: 5000,
+ distanceModel: 'linear',
+ maxDistance: 1000,
+ refDistance: 10,
+ });
+ await runTest(should, {
+ distance: 5000,
+ distanceModel: 'exponential',
+ maxDistance: 1000,
+ refDistance: 10,
+ });
+ await runTest(should, {
+ distance: 5000,
+ distanceModel: 'inverse',
+ maxDistance: 1000,
+ refDistance: 10,
+ });
+ task.done();
+ });
+
+ function runTest(should, options) {
+ let context = new OfflineAudioContext(2, renderFrames, sampleRate);
+ let src = new OscillatorNode(context, {
+ type: 'sawtooth',
+ frequency: 20 * 440,
+ });
+
+ // Set panner options. Use a non-default rolloffFactor so that the
+ // various distance models look distinctly different.
+ let pannerOptions = {};
+ Object.assign(pannerOptions, options, {rolloffFactor: 0.5});
+
+ let pannerRef = new PannerNode(context, pannerOptions);
+ let pannerTest = new PannerNode(context, pannerOptions);
+
+ // Split the panner output so we can grab just one of the output
+ // channels.
+ let splitRef = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ let splitTest = new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ // Merge the panner outputs back into one stereo stream for the
+ // destination.
+ let merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+
+ src.connect(pannerTest).connect(splitTest).connect(merger, 0, 0);
+ src.connect(pannerRef).connect(splitRef).connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ // Move the panner some distance away. Arbitrarily select the x
+ // direction. For the reference panner, manually clamp the distance.
+ // All models clamp the distance to a minimum of refDistance. Only the
+ // linear model also clamps to a maximum of maxDistance.
+ let xRef = Math.max(options.distance, pannerRef.refDistance);
+
+ if (pannerRef.distanceModel === 'linear') {
+ xRef = Math.min(xRef, pannerRef.maxDistance);
+ }
+
+ let xTest = options.distance;
+
+ pannerRef.positionZ.setValueAtTime(xRef, 0);
+ pannerTest.positionZ.setValueAtTime(xTest, 0);
+
+ src.start();
+
+ return context.startRendering().then(function(resultBuffer) {
+ let actual = resultBuffer.getChannelData(0);
+ let expected = resultBuffer.getChannelData(1);
+
+ should(
+ xTest < pannerRef.refDistance || xTest > pannerRef.maxDistance,
+ 'Model: ' + options.distanceModel + ': Distance (' + xTest +
+ ') is outside the range [' + pannerRef.refDistance + ', ' +
+ pannerRef.maxDistance + ']')
+ .beEqualTo(true);
+ should(actual, 'Test panner output ' + JSON.stringify(options))
+ .beEqualToArray(expected);
+ });
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower-stereo.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower-stereo.html
new file mode 100644
index 0000000000..2a0225b3f6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower-stereo.html
@@ -0,0 +1,44 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ panner-equalpower-stereo.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/panner-model-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // To test the panner, we create a number of panner nodes
+ // equally spaced on a semicircle at unit distance. The
+ // semicircle covers the azimuth range from -90 to 90 deg,
+ // covering full left to full right. Each source is an impulse
+ // turning at a different time and we check that the rendered
+ // impulse has the expected gain.
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Equal-power panner model of AudioPannerNode with stereo source'
+ },
+ (task, should) => {
+ context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ createTestAndRun(
+ context, should, nodesToCreate, 2,
+ function(panner, x, y, z) {
+ panner.setPosition(x, y, z);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower.html
new file mode 100644
index 0000000000..3ff21b651f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-equalpower.html
@@ -0,0 +1,139 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ panner-equalpower.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/panner-model-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ // To test the panner, we create a number of panner nodes
+ // equally spaced on a semicircle at unit distance. The
+ // semicircle covers the azimuth range from -90 to 90 deg,
+ // covering full left to full right. Each source is an impulse
+ // turning at a different time and we check that the rendered
+ // impulse has the expected gain.
+ audit.define(
+ {
+ label: 'test',
+ description: 'Equal-power panner model of AudioPannerNode',
+ },
+ (task, should) => {
+ // Create offline audio context.
+ context = new OfflineAudioContext(
+ 2, sampleRate * renderLengthSeconds, sampleRate);
+
+ createTestAndRun(
+ context, should, nodesToCreate, 1,
+ function(panner, x, y, z) {
+ panner.setPosition(x, y, z);
+ })
+ .then(() => task.done());
+ ;
+ });
+
+ // Test that a mono source plays out on both the left and right channels
+ // when the source and listener positions are the same.
+ audit.define(
+ {
+ label: 'mono source=listener',
+ description: 'Source and listener at the same position'
+ },
+ (task, should) => {
+ // Must be stereo to verify output and only need a short duration
+ let context =
+ new OfflineAudioContext(2, 0.25 * sampleRate, sampleRate);
+
+ // Arbitrary position for source and listener. Just so we don't use
+ // defaults positions.
+ let x = 1;
+ let y = 2;
+ let z = 3;
+
+ context.listener.setPosition(x, y, z);
+
+ let src = new OscillatorNode(context);
+ let panner = new PannerNode(context, {
+ panningModel: 'equalpower',
+ positionX: x,
+ positionY: y,
+ positionZ: z
+ });
+
+ src.connect(panner).connect(context.destination);
+
+ src.start();
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ // Verify that both channels have the same data because they
+ // should when the source and listener are at the same
+ // position
+ let c0 = renderedBuffer.getChannelData(0);
+ let c1 = renderedBuffer.getChannelData(1);
+ should(c0, 'Mono: Left and right channels').beEqualToArray(c1);
+ })
+ .then(() => task.done());
+ });
+
+ // Test that a stereo source plays out on both the left and right channels
+ // when the source and listener positions are the same.
+ audit.define(
+ {
+ label: 'stereo source=listener',
+ description: 'Source and listener at the same position'
+ },
+ (task, should) => {
+ // Must be stereo to verify output and only need a short duration.
+ let context =
+ new OfflineAudioContext(2, 0.25 * sampleRate, sampleRate);
+
+ // Arbitrary position for source and listener. Just so we don't use
+ // defaults positions.
+ let x = 1;
+ let y = 2;
+ let z = 3;
+
+ context.listener.setPosition(x, y, z);
+
+ let src = new OscillatorNode(context);
+ let merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+ let panner = new PannerNode(context, {
+ panningModel: 'equalpower',
+ positionX: x,
+ positionY: y,
+ positionZ: z
+ });
+
+ // Make the oscillator a stereo signal (with identical signals on
+ // each channel).
+ src.connect(merger, 0, 0);
+ src.connect(merger, 0, 1);
+
+ merger.connect(panner).connect(context.destination);
+
+ src.start();
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ // Verify that both channels have the same data because they
+ // should when the source and listener are at the same
+ // position.
+ let c0 = renderedBuffer.getChannelData(0);
+ let c1 = renderedBuffer.getChannelData(1);
+ should(c0, 'Stereo: Left and right channels').beEqualToArray(c1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-rolloff-clamping.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-rolloff-clamping.html
new file mode 100644
index 0000000000..387f873010
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/panner-rolloff-clamping.html
@@ -0,0 +1,98 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Clamping of PannerNode rolloffFactor
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Fairly arbitrary sample rate and render frames.
+ let sampleRate = 16000;
+ let renderFrames = 2048;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'linear-clamp-high',
+ description: 'rolloffFactor clamping for linear distance model'
+ },
+ (task, should) => {
+ runTest(should, {
+ distanceModel: 'linear',
+ // Fairly arbitrary value outside the nominal range
+ rolloffFactor: 2,
+ clampedRolloff: 1
+ }).then(() => task.done());
+ });
+
+ // Test clamping of the rolloffFactor. The test is done by comparing the
+ // output of a panner with the rolloffFactor set outside the nominal range
+ // against the output of a panner with the rolloffFactor clamped to the
+ // nominal range. The outputs should be the same.
+ //
+ // The |options| dictionary should contain the members
+ // distanceModel - The distance model to use for the panners
+ // rolloffFactor - The desired rolloffFactor. Should be outside the
+ // nominal range of the distance model.
+ // clampedRolloff - The rolloffFactor (above) clamped to the nominal
+ // range for the given distance model.
+ function runTest(should, options) {
+ // Offline context with two channels. The first channel is the panner
+ // node under test. The second channel is the reference panner node.
+ let context = new OfflineAudioContext(2, renderFrames, sampleRate);
+
+ // The source for the panner nodes. This is fairly arbitrary.
+ let src = new OscillatorNode(context, {type: 'sawtooth'});
+
+ // Create the test panner with the specified rolloff factor. The
+ // position is fairly arbitrary, but something that is not the default
+ // is good to show the distance model had some effect.
+ let pannerTest = new PannerNode(context, {
+ rolloffFactor: options.rolloffFactor,
+ distanceModel: options.distanceModel,
+ positionX: 5000
+ });
+
+ // Create the reference panner with the rolloff factor clamped to the
+ // appropriate limit.
+ let pannerRef = new PannerNode(context, {
+ rolloffFactor: options.clampedRolloff,
+ distanceModel: options.distanceModel,
+ positionX: 5000
+ });
+
+
+ // Connect the source to the panners to the destination appropriately.
+ let merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+
+
+ src.connect(pannerTest).connect(merger, 0, 0);
+ src.connect(pannerRef).connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ src.start();
+
+ return context.startRendering().then(function(resultBuffer) {
+ // The two channels should be the same due to the clamping. Verify
+ // that they are the same.
+ let actual = resultBuffer.getChannelData(0);
+ let expected = resultBuffer.getChannelData(1);
+
+ let message = 'Panner distanceModel: "' + options.distanceModel +
+ '", rolloffFactor: ' + options.rolloffFactor;
+
+ should(actual, message).beEqualToArray(expected);
+ });
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.window.js b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.window.js
new file mode 100644
index 0000000000..298fce0f20
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.window.js
@@ -0,0 +1,71 @@
+test((t) => {
+ const context = new AudioContext();
+ const source = new ConstantSourceNode(context);
+ const panner = new PannerNode(context);
+ source.connect(panner).connect(context.destination);
+
+ // Basic parameters
+ assert_equals(panner.numberOfInputs,1);
+ assert_equals(panner.numberOfOutputs,1);
+ assert_equals(panner.refDistance, 1);
+ panner.refDistance = 270.5;
+ assert_equals(panner.refDistance, 270.5);
+ assert_equals(panner.maxDistance, 10000);
+ panner.maxDistance = 100.5;
+ assert_equals(panner.maxDistance, 100.5);
+ assert_equals(panner.rolloffFactor, 1);
+ panner.rolloffFactor = 0.75;
+ assert_equals(panner.rolloffFactor, 0.75);
+ assert_equals(panner.coneInnerAngle, 360);
+ panner.coneInnerAngle = 240.5;
+ assert_equals(panner.coneInnerAngle, 240.5);
+ assert_equals(panner.coneOuterAngle, 360);
+ panner.coneOuterAngle = 166.5;
+ assert_equals(panner.coneOuterAngle, 166.5);
+ assert_equals(panner.coneOuterGain, 0);
+ panner.coneOuterGain = 0.25;
+ assert_equals(panner.coneOuterGain, 0.25);
+ assert_equals(panner.panningModel, 'equalpower');
+ assert_equals(panner.distanceModel, 'inverse');
+
+ // Position/orientation AudioParams
+ assert_equals(panner.positionX.value, 0);
+ assert_equals(panner.positionY.value, 0);
+ assert_equals(panner.positionZ.value, 0);
+ assert_equals(panner.orientationX.value, 1);
+ assert_equals(panner.orientationY.value, 0);
+ assert_equals(panner.orientationZ.value, 0);
+
+ // AudioListener
+ assert_equals(context.listener.positionX.value, 0);
+ assert_equals(context.listener.positionY.value, 0);
+ assert_equals(context.listener.positionZ.value, 0);
+ assert_equals(context.listener.forwardX.value, 0);
+ assert_equals(context.listener.forwardY.value, 0);
+ assert_equals(context.listener.forwardZ.value, -1);
+ assert_equals(context.listener.upX.value, 0);
+ assert_equals(context.listener.upY.value, 1);
+ assert_equals(context.listener.upZ.value, 0);
+
+ panner.panningModel = 'equalpower';
+ assert_equals(panner.panningModel, 'equalpower');
+ panner.panningModel = 'HRTF';
+ assert_equals(panner.panningModel, 'HRTF');
+ panner.panningModel = 'invalid';
+ assert_equals(panner.panningModel, 'HRTF');
+
+ // Check that numerical values are no longer supported. We shouldn't
+ // throw and the value shouldn't be changed.
+ panner.panningModel = 1;
+ assert_equals(panner.panningModel, 'HRTF');
+
+ panner.distanceModel = 'linear';
+ assert_equals(panner.distanceModel, 'linear');
+ panner.distanceModel = 'inverse';
+ assert_equals(panner.distanceModel, 'inverse');
+ panner.distanceModel = 'exponential';
+ assert_equals(panner.distanceModel, 'exponential');
+
+ panner.distanceModel = 'invalid';
+ assert_equals(panner.distanceModel, 'exponential');
+}, 'Test the PannerNode interface');
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-setposition-throws.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-setposition-throws.html
new file mode 100644
index 0000000000..2053411943
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/pannernode-setposition-throws.html
@@ -0,0 +1,37 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test PannerNode.setPosition() throws with parameter out of range of float</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+// https://webaudio.github.io/web-audio-api/#dom-pannernode-setposition
+// setPosition(x, y, z) "is equivalent to setting positionX.value,
+// positionY.value, and positionZ.value directly with the given x, y, and z
+// values, respectively." setPosition() parameters are double, but the
+// AudioParam value setter has a float parameter, so out of range values
+// throw.
+const FLT_MAX = 3.40282e+38;
+let panner;
+setup(() => {
+ const ctx = new OfflineAudioContext({length: 1, sampleRate: 24000});
+ panner = ctx.createPanner();
+});
+test(() => {
+ assert_throws_js(TypeError, () => panner.setPosition(2 * FLT_MAX, 0, 0));
+}, "setPosition x");
+test(() => {
+ assert_throws_js(TypeError, () => panner.setPosition(0, -2 * FLT_MAX, 0));
+}, "setPosition y");
+test(() => {
+ assert_throws_js(TypeError, () => panner.setPosition(0, 0, 2 * FLT_MAX));
+}, "setPosition z");
+test(() => {
+ assert_throws_js(TypeError, () => panner.setOrientation(-2 * FLT_MAX, 0, 0));
+}, "setOrientation x");
+test(() => {
+ assert_throws_js(TypeError, () => panner.setOrientation(0, 2 * FLT_MAX, 0));
+}, "setOrientation y");
+test(() => {
+ assert_throws_js(TypeError, () => panner.setOrientation(0, 0, -2 * FLT_MAX));
+}, "setOrientation z");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html
new file mode 100644
index 0000000000..ce474b10b5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html
@@ -0,0 +1,36 @@
+<!doctype html>
+<meta charset=utf-8>
+<title></title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+
+// This value is purposefuly not aligned on a 128-block boundary so that we test
+// that the PannerNode position audioparam is a-rate.
+const POSITION_CHANGE_FRAME = 1111;
+
+promise_test(function(t) {
+ var ac = new OfflineAudioContext(2, 2048, 44100);
+ var panner = ac.createPanner();
+ panner.positionX.value = -1;
+ panner.positionY.value = -1;
+ panner.positionZ.value = 1;
+ panner.positionX.setValueAtTime(1, POSITION_CHANGE_FRAME/ac.sampleRate);
+ var osc = ac.createOscillator();
+ osc.connect(panner);
+ panner.connect(ac.destination);
+ osc.start()
+ return ac.startRendering().then(function(buffer) {
+ var left = buffer.getChannelData(0);
+ var right = buffer.getChannelData(1);
+ for (var i = 0; i < 2048; ++i) {
+ if (i < POSITION_CHANGE_FRAME) {
+ assert_true(Math.abs(left[i]) >= Math.abs(right[i]), "index " + i + " should be on the left");
+ } else {
+ assert_true(Math.abs(left[i]) < Math.abs(right[i]), "index " + i + " should be on the right");
+ }
+ }
+ });
+}, "PannerNode AudioParam automation works properly");
+
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/createPeriodicWaveInfiniteValuesThrows.html b/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/createPeriodicWaveInfiniteValuesThrows.html
new file mode 100644
index 0000000000..928f45bd8f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/createPeriodicWaveInfiniteValuesThrows.html
@@ -0,0 +1,22 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test AudioContext.createPeriodicWave when inputs contain Infinite values</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+let ctx;
+setup(() => {
+ ctx = new OfflineAudioContext({length: 1, sampleRate: 24000});
+});
+test(() => {
+ const real = new Float32Array([0, Infinity]);
+ const imag = new Float32Array([0, 1]);
+ assert_throws_js(TypeError, () => ctx.createPeriodicWave(real, imag));
+}, "createPeriodicWave with Infinity real values should throw");
+
+test(() => {
+ const real = new Float32Array([0, 1]);
+ const imag = new Float32Array([1, Infinity]);
+ assert_throws_js(TypeError, () => ctx.createPeriodicWave(real, imag));
+}, "createPeriodicWave with Infinity imag values should throw");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/periodicWave.html b/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/periodicWave.html
new file mode 100644
index 0000000000..9048b7f5da
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/periodicWave.html
@@ -0,0 +1,130 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: PeriodicWave
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // real and imag are used in separate PeriodicWaves to make their peak values
+ // easy to determine.
+ const realMax = 99;
+ var real = new Float32Array(realMax + 1);
+ real[1] = 2.0; // fundamental
+ real[realMax] = 3.0;
+ const realPeak = real[1] + real[realMax];
+ const realFundamental = 19.0;
+ var imag = new Float32Array(4);
+ imag[0] = 6.0; // should be ignored.
+ imag[3] = 0.5;
+ const imagPeak = imag[3];
+ const imagFundamental = 551.0;
+
+ const testLength = 4096;
+ let context = new AudioContext();
+
+ let audit = Audit.createTaskRunner();
+
+ // Create with the factory method
+
+ audit.define('create with factory method', (task, should) => {
+ should(() => {
+ context.createPeriodicWave(new Float32Array(4096), new Float32Array(4096));
+ }, 'context.createPeriodicWave(new Float32Array(4096), ' +
+ 'new Float32Array(4096))').notThrow();
+ task.done();
+ });
+
+ audit.define('different length with factory method', (task, should) => {
+ should(() => {
+ context.createPeriodicWave(new Float32Array(512), new Float32Array(4));
+ }, 'context.createPeriodicWave(new Float32Array(512), ' +
+ 'new Float32Array(4))').throw(DOMException, "IndexSizeError");
+ task.done();
+ });
+
+ audit.define('too small with factory method', (task, should) => {
+ should(() => {
+ context.createPeriodicWave(new Float32Array(1), new Float32Array(1));
+ }, 'context.createPeriodicWave(new Float32Array(1), ' +
+ 'new Float32Array(1))').throw(DOMException, "IndexSizeError");
+ task.done();
+ });
+
+ // Create with the constructor
+
+ audit.define('create with constructor', (task, should) => {
+ should(() => {
+ new PeriodicWave(context, { real: new Float32Array(4096), imag: new Float32Array(4096) });
+ }, 'new PeriodicWave(context, { real : new Float32Array(4096), ' +
+ 'imag : new Float32Array(4096) })').notThrow();
+ task.done();
+ });
+
+ audit.define('different length with constructor', (task, should) => {
+ should(() => {
+ new PeriodicWave(context, { real: new Float32Array(4096), imag: new Float32Array(4) });
+ }, 'new PeriodicWave(context, { real : new Float32Array(4096), ' +
+ 'imag : new Float32Array(4) })').throw(DOMException, "IndexSizeError");
+ task.done();
+ });
+
+ audit.define('too small with constructor', (task, should) => {
+ should(() => {
+ new PeriodicWave(context, { real: new Float32Array(1), imag: new Float32Array(1) });
+ }, 'new PeriodicWave(context, { real : new Float32Array(1), ' +
+ 'imag : new Float32Array(1) })').throw(DOMException, "IndexSizeError");
+ task.done();
+ });
+
+ audit.define('output test', (task, should) => {
+ let context = new OfflineAudioContext(2, 4096, 44100);
+ // Create the expected output buffer
+ let expectations = context.createBuffer(2, testLength, context.sampleRate);
+ for (var i = 0; i < expectations.length; ++i) {
+
+ expectations.getChannelData(0)[i] = 1.0 / realPeak *
+ (real[1] * Math.cos(2 * Math.PI * realFundamental * i /
+ context.sampleRate) +
+ real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i /
+ context.sampleRate));
+
+ expectations.getChannelData(1)[i] = 1.0 / imagPeak *
+ imag[3] * Math.sin(2 * Math.PI * 3 * imagFundamental * i /
+ context.sampleRate);
+ }
+
+ // Create the real output buffer
+ let merger = context.createChannelMerger();
+
+ let osc1 = context.createOscillator();
+ let osc2 = context.createOscillator();
+
+ osc1.setPeriodicWave(context.createPeriodicWave(
+ real, new Float32Array(real.length)));
+ osc2.setPeriodicWave(context.createPeriodicWave(
+ new Float32Array(imag.length), imag));
+ osc1.frequency.value = realFundamental;
+ osc2.frequency.value = imagFundamental;
+
+ osc1.start();
+ osc2.start();
+
+ osc1.connect(merger, 0, 0);
+ osc2.connect(merger, 0, 1);
+
+ context.startRendering().then(reality => {
+ should(reality, 'rendering PeriodicWave').beEqualToArray(expectations);
+ task.done();
+ });
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/simple-input-output.html b/testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/simple-input-output.html
new file mode 100644
index 0000000000..7fd20e67a7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/simple-input-output.html
@@ -0,0 +1,98 @@
+<!doctype html>
+<html>
+ <head>
+ <title>Test ScriptProcessorNode</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+
+ <body>
+ <script>
+ // Arbitrary sample rate
+ const sampleRate = 48000;
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description: 'ScriptProcessor with stopped input source'
+ },
+ (task, should) => {
+ // Two channels for testing. Channel 0 is the output of the
+ // scriptProcessor. Channel 1 is the oscillator so we can compare
+ // the outputs.
+ let context = new OfflineAudioContext({
+ numberOfChannels: 2,
+ length: sampleRate,
+ sampleRate: sampleRate
+ });
+
+ let merger = new ChannelMergerNode(
+ context, {numberOfChannels: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let src = new OscillatorNode(context);
+
+ // Arbitrary buffer size for the ScriptProcessorNode. Don't use 0;
+ // we need to know the actual size to know the latency of the node
+ // (easily).
+ const spnSize = 512;
+ let spn = context.createScriptProcessor(spnSize, 1, 1);
+
+ // Arrange for the ScriptProcessor to add |offset| to the input.
+ const offset = 1;
+ spn.onaudioprocess = (event) => {
+ let input = event.inputBuffer.getChannelData(0);
+ let output = event.outputBuffer.getChannelData(0);
+ for (let k = 0; k < output.length; ++k) {
+ output[k] = input[k] + offset;
+ }
+ };
+
+ src.connect(spn).connect(merger, 0, 0);
+ src.connect(merger, 0, 1);
+
+ // Start and stop the source. The stop time is fairly arbitrary,
+ // but use a render quantum boundary for simplicity.
+ const stopFrame = RENDER_QUANTUM_FRAMES;
+ src.start(0);
+ src.stop(stopFrame / context.sampleRate);
+
+ context.startRendering()
+ .then(buffer => {
+ let ch0 = buffer.getChannelData(0);
+ let ch1 = buffer.getChannelData(1);
+
+ let shifted = ch1.slice(0, stopFrame).map(x => x + offset);
+
+ // SPN has a basic latency of 2*|spnSize| fraems, so the
+ // beginning is silent.
+ should(
+ ch0.slice(0, 2 * spnSize - 1),
+ `ScriptProcessor output[0:${2 * spnSize - 1}]`)
+ .beConstantValueOf(0);
+
+ // For the middle section (after adding latency), the output
+ // should be the source shifted by |offset|.
+ should(
+ ch0.slice(2 * spnSize, 2 * spnSize + stopFrame),
+ `ScriptProcessor output[${2 * spnSize}:${
+ 2 * spnSize + stopFrame - 1}]`)
+ .beCloseToArray(shifted, {absoluteThreshold: 0});
+
+ // Output should be constant after the source has stopped.
+ // Include the latency introduced by the node.
+ should(
+ ch0.slice(2 * spnSize + stopFrame),
+ `ScriptProcessor output[${2 * spnSize + stopFrame}:]`)
+ .beConstantValueOf(offset);
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/ctor-stereopanner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/ctor-stereopanner.html
new file mode 100644
index 0000000000..9409f1ffce
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/ctor-stereopanner.html
@@ -0,0 +1,131 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: StereoPanner
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('invalid constructor', (task, should) => {
+ testInvalidConstructor(should, 'StereoPannerNode', context);
+ task.done();
+ });
+
+ audit.define('default constructor', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'StereoPannerNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'clamped-max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [{name: 'pan', value: 0}]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ // Can't use testAudioNodeOptions because the constraints for this node
+ // are not supported there.
+ let node;
+
+ // An array of tests.
+ [{
+ // Test that we can set the channel count to 1 or 2 and that other
+ // channel counts throw an error.
+ attribute: 'channelCount',
+ tests: [
+ {value: 1}, {value: 2}, {value: 0, error: 'NotSupportedError'},
+ {value: 3, error: 'NotSupportedError'},
+ {value: 99, error: 'NotSupportedError'}
+ ]
+ },
+ {
+ // Test channelCountMode. A mode of "max" is illegal, but others are
+ // ok. But also throw an error of unknown values.
+ attribute: 'channelCountMode',
+ tests: [
+ {value: 'clamped-max'}, {value: 'explicit'},
+ {value: 'max', error: 'NotSupportedError'},
+ {value: 'foobar', error: TypeError}
+ ]
+ },
+ {
+ // Test channelInterpretation can be set for valid values and an
+ // error is thrown for others.
+ attribute: 'channelInterpretation',
+ tests: [
+ {value: 'speakers'}, {value: 'discrete'},
+ {value: 'foobar', error: TypeError}
+ ]
+ }].forEach(entry => {
+ entry.tests.forEach(testItem => {
+ let options = {};
+ options[entry.attribute] = testItem.value;
+
+ const testFunction = () => {
+ node = new StereoPannerNode(context, options);
+ };
+ const testDescription =
+ `new StereoPannerNode(c, ${JSON.stringify(options)})`;
+
+ if (testItem.error) {
+ testItem.error === TypeError
+ ? should(testFunction, testDescription).throw(TypeError)
+ : should(testFunction, testDescription)
+ .throw(DOMException, 'NotSupportedError');
+ } else {
+ should(testFunction, testDescription).notThrow();
+ should(node[entry.attribute], `node.${entry.attribute}`)
+ .beEqualTo(options[entry.attribute]);
+ }
+ });
+ });
+
+ task.done();
+ });
+
+ audit.define('constructor with options', (task, should) => {
+ let node;
+ let options = {
+ pan: 0.75,
+ };
+
+ should(
+ () => {
+ node = new StereoPannerNode(context, options);
+ },
+ 'node1 = new StereoPannerNode(, ' + JSON.stringify(options) + ')')
+ .notThrow();
+ should(
+ node instanceof StereoPannerNode,
+ 'node1 instanceof StereoPannerNode')
+ .beEqualTo(true);
+
+ should(node.pan.value, 'node1.pan.value').beEqualTo(options.pan);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html
new file mode 100644
index 0000000000..355db8b9dc
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html
@@ -0,0 +1,261 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test StereoPannerNode Has No Dezippering
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Arbitrary sample rate except that it should be a power of two to
+ // eliminate any round-off in computing frame boundaries.
+ let sampleRate = 16384;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test mono input',
+ description: 'Test StereoPanner with mono input has no dezippering'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(2, sampleRate, sampleRate);
+ let src = new ConstantSourceNode(context, {offset: 1});
+ let p = new StereoPannerNode(context, {pan: -1});
+
+ src.connect(p).connect(context.destination);
+ src.start();
+
+ // Frame at which to change pan value.
+ let panFrame = 256;
+ context.suspend(panFrame / context.sampleRate)
+ .then(() => p.pan.value = 1)
+ .then(() => context.resume());
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ let c0 = renderedBuffer.getChannelData(0);
+ let c1 = renderedBuffer.getChannelData(1);
+
+ // The first part should be full left.
+ should(
+ c0.slice(0, panFrame), 'Mono: Left channel, pan = -1: ')
+ .beConstantValueOf(1);
+ should(
+ c1.slice(0, panFrame), 'Mono: Right channel, pan = -1:')
+ .beConstantValueOf(0);
+
+ // The second part should be full right, but due to roundoff,
+ // the left channel won't be exactly zero. Compare the left
+ // channel against zero with a threshold instead.
+ let tail = c0.slice(panFrame);
+ let zero = new Float32Array(tail.length);
+
+ should(c0.slice(panFrame), 'Mono: Left channel, pan = 1: ')
+ .beCloseToArray(zero, {absoluteThreshold: 6.1233e-17});
+ should(c1.slice(panFrame), 'Mono: Right channel, pan = 1:')
+ .beConstantValueOf(1);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test stereo input',
+ description:
+ 'Test StereoPanner with stereo input has no dezippering'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(2, sampleRate, sampleRate);
+
+ // Create stereo source from two constant source nodes.
+ let s0 = new ConstantSourceNode(context, {offset: 1});
+ let s1 = new ConstantSourceNode(context, {offset: 2});
+ let merger = new ChannelMergerNode(context, {numberOfInputs: 2});
+
+ s0.connect(merger, 0, 0);
+ s1.connect(merger, 0, 1);
+
+ let p = new StereoPannerNode(context, {pan: -1});
+
+ merger.connect(p).connect(context.destination);
+ s0.start();
+ s1.start();
+
+ // Frame at which to change pan value.
+ let panFrame = 256;
+ context.suspend(panFrame / context.sampleRate)
+ .then(() => p.pan.value = 1)
+ .then(() => context.resume());
+
+ context.startRendering()
+ .then(renderedBuffer => {
+ let c0 = renderedBuffer.getChannelData(0);
+ let c1 = renderedBuffer.getChannelData(1);
+
+ // The first part should be full left.
+ should(
+ c0.slice(0, panFrame), 'Stereo: Left channel, pan = -1: ')
+ .beConstantValueOf(3);
+ should(
+ c1.slice(0, panFrame), 'Stereo: Right channel, pan = -1:')
+ .beConstantValueOf(0);
+
+ // The second part should be full right, but due to roundoff,
+ // the left channel won't be exactly zero. Compare the left
+ // channel against zero with a threshold instead.
+ let tail = c0.slice(panFrame);
+ let zero = new Float32Array(tail.length);
+
+ should(c0.slice(panFrame), 'Stereo: Left channel, pan = 1: ')
+ .beCloseToArray(zero, {absoluteThreshold: 6.1233e-17});
+ should(c1.slice(panFrame), 'Stereo: Right channel, pan = 1:')
+ .beConstantValueOf(3);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test mono input setValue',
+ description: 'Test StereoPanner with mono input value setter ' +
+ 'vs setValueAtTime'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(4, sampleRate, sampleRate);
+
+ let src = new OscillatorNode(context);
+
+ src.start();
+ testWithSetValue(context, src, should, {
+ prefix: 'Mono'
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test stereo input setValue',
+ description: 'Test StereoPanner with mono input value setter ' +
+ ' vs setValueAtTime'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(4, sampleRate, sampleRate);
+
+ let src0 = new OscillatorNode(context, {frequency: 800});
+ let src1 = new OscillatorNode(context, {frequency: 250});
+ let merger = new ChannelMergerNode(context, {numberOfChannels: 2});
+
+ src0.connect(merger, 0, 0);
+ src1.connect(merger, 0, 1);
+
+ src0.start();
+ src1.start();
+
+ testWithSetValue(context, merger, should, {
+ prefix: 'Stereo'
+ }).then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test mono input automation',
+ description: 'Test StereoPanner with mono input and automation'
+ },
+ (task, should) => {
+ let context = new OfflineAudioContext(4, sampleRate, sampleRate);
+
+ let src0 = new OscillatorNode(context, {frequency: 800});
+ let src1 = new OscillatorNode(context, {frequency: 250});
+ let merger = new ChannelMergerNode(context, {numberOfChannels: 2});
+
+ src0.connect(merger, 0, 0);
+ src1.connect(merger, 0, 1);
+
+ src0.start();
+ src1.start();
+
+ let mod = new OscillatorNode(context, {frequency: 100});
+ mod.start();
+
+ testWithSetValue(context, merger, should, {
+ prefix: 'Modulated Stereo',
+ modulator: (testNode, refNode) => {
+ mod.connect(testNode.pan);
+ mod.connect(refNode.pan);
+ }
+ }).then(() => task.done());
+ });
+
+
+ function testWithSetValue(context, src, should, options) {
+ let merger = new ChannelMergerNode(
+ context, {numberOfInputs: context.destination.channelCount});
+ merger.connect(context.destination);
+
+ let pannerRef = new StereoPannerNode(context, {pan: -0.3});
+ let pannerTest =
+ new StereoPannerNode(context, {pan: pannerRef.pan.value});
+
+ let refSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+ let testSplitter =
+ new ChannelSplitterNode(context, {numberOfOutputs: 2});
+
+ pannerRef.connect(refSplitter);
+ pannerTest.connect(testSplitter);
+
+ testSplitter.connect(merger, 0, 0);
+ testSplitter.connect(merger, 1, 1);
+ refSplitter.connect(merger, 0, 2);
+ refSplitter.connect(merger, 1, 3);
+
+ src.connect(pannerRef);
+ src.connect(pannerTest);
+
+ let changeTime = 3 * RENDER_QUANTUM_FRAMES / context.sampleRate;
+ // An arbitrary position, different from the default pan value.
+ let newPanPosition = .71;
+
+ pannerRef.pan.setValueAtTime(newPanPosition, changeTime);
+ context.suspend(changeTime)
+ .then(() => pannerTest.pan.value = newPanPosition)
+ .then(() => context.resume());
+
+ if (options.modulator) {
+ options.modulator(pannerTest, pannerRef);
+ }
+ return context.startRendering().then(renderedBuffer => {
+ let actual = new Array(2);
+ let expected = new Array(2);
+
+ actual[0] = renderedBuffer.getChannelData(0);
+ actual[1] = renderedBuffer.getChannelData(1);
+ expected[0] = renderedBuffer.getChannelData(2);
+ expected[1] = renderedBuffer.getChannelData(3);
+
+ let label = ['Left', 'Right'];
+
+ for (let k = 0; k < 2; ++k) {
+ let match =
+ should(
+ actual[k],
+ options.prefix + ' ' + label[k] + ' .value setter output')
+ .beCloseToArray(expected[k], {absoluteThreshold: 1.192094e-7});
+ should(
+ match,
+ options.prefix + ' ' + label[k] +
+ ' .value setter output matches setValueAtTime output')
+ .beTrue();
+ }
+
+ });
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-basic.html b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-basic.html
new file mode 100644
index 0000000000..48bacb08c6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-basic.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ stereopannernode-basic.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'Attributes and basic functionality of StereoPannerNode'
+ },
+ (task, should) => {
+
+ let context = new AudioContext();
+ let panner = context.createStereoPanner();
+
+ should(panner.numberOfInputs, 'panner.numberOfInputs').beEqualTo(1);
+ should(panner.numberOfOutputs, 'panner.numberOfOutputs')
+ .beEqualTo(1);
+ should(panner.pan.defaultValue, 'panner.pan.defaultValue')
+ .beEqualTo(0.0);
+ should(() => panner.pan.value = 1.0, 'panner.pan.value = 1.0')
+ .notThrow();
+ should(panner.pan.value, 'panner.pan.value').beEqualTo(1.0);
+
+ should(() => panner.channelCount = 1, 'panner.channelCount = 1')
+ .notThrow();
+ should(() => panner.channelCount = 3, 'panner.channelCount = 3')
+ .throw();
+ should(
+ () => panner.channelCountMode = 'explicit',
+ 'panner.channelCountMode = "explicit"')
+ .notThrow();
+ should(
+ () => panner.channelCountMode = 'max',
+ 'panner.channelCountMode = "max"')
+ .throw();
+
+ task.done();
+ });
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-panning.html b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-panning.html
new file mode 100644
index 0000000000..f683fd78bf
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-stereopanner-interface/stereopannernode-panning.html
@@ -0,0 +1,34 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ stereopannernode-panning.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/stereopanner-testing.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('mono-test', (task, should) => {
+ StereoPannerTest
+ .create(should, {numberOfInputChannels: 1, prefix: 'Mono: '})
+ .run()
+ .then(() => task.done());
+ });
+
+ audit.define('stereo-test', (task, should) => {
+ StereoPannerTest
+ .create(should, {numberOfInputChannels: 2, prefix: 'Stereo: '})
+ .run()
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/ctor-waveshaper.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/ctor-waveshaper.html
new file mode 100644
index 0000000000..7aa33ca5aa
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/ctor-waveshaper.html
@@ -0,0 +1,72 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test Constructor: WaveShaper
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ <script src="/webaudio/resources/audionodeoptions.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let context;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define('initialize', (task, should) => {
+ context = initializeContext(should);
+ task.done();
+ });
+
+ audit.define('incorrect construction', (task, should) => {
+ testInvalidConstructor(should, 'WaveShaperNode', context);
+ task.done();
+ });
+
+ audit.define('valid default construction', (task, should) => {
+ let prefix = 'node0';
+ let node = testDefaultConstructor(should, 'WaveShaperNode', context, {
+ prefix: prefix,
+ numberOfInputs: 1,
+ numberOfOutputs: 1,
+ channelCount: 2,
+ channelCountMode: 'max',
+ channelInterpretation: 'speakers'
+ });
+
+ testDefaultAttributes(should, node, prefix, [
+ {name: 'curve', value: null}, {name: 'oversample', value: 'none'}
+ ]);
+
+ task.done();
+ });
+
+ audit.define('test AudioNodeOptions', (task, should) => {
+ testAudioNodeOptions(should, context, 'WaveShaperNode');
+ task.done();
+ });
+
+ audit.define('valid non-default', (task, should) => {
+ // Construct an WaveShaperNode with options
+ let options = {curve: Float32Array.from([1, 2, 3]), oversample: '4x'};
+ let node;
+
+ let message =
+ 'node1 = new WaveShaperNode(, ' + JSON.stringify(options) + ')';
+ should(() => {
+ node = new WaveShaperNode(context, options);
+ }, message).notThrow();
+ should(node.curve, 'node1.curve').beEqualToArray(options.curve);
+ should(node.oversample, 'node1.oversample')
+ .beEqualTo(options.oversample);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html
new file mode 100644
index 0000000000..d09cf78fd8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html
@@ -0,0 +1,184 @@
+<!doctype html>
+<html>
+<head>
+ <title>WaveShaperNode interface - Curve tests | WebAudio</title>
+
+ <script type="text/javascript" src="/resources/testharness.js"></script>
+ <script type="text/javascript" src="/resources/testharnessreport.js"></script>
+</head>
+<body>
+ <div id="log">
+ </div>
+
+ <script type="text/javascript">
+ var sampleRate=44100.0;
+ var tolerance=0.01;
+
+ /*
+ Testing that -1, 0 and +1 map correctly to curve (with 1:1 correlation)
+ =======================================================================
+ From the specification:
+ The input signal is nominally within the range -1 -> +1.
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ */
+ (function() {
+ var threeElementCurve=[2.0, -3.0, 4.0];
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[2.0, -3.0, 4.0];
+ executeTest(threeElementCurve, inputData, expectedData, "Testing that -1, 0 and +1 map correctly to curve (with 1:1 correlation)");
+ })();
+
+ /*
+ Testing interpolation (where inputs don't correlate directly to curve elements)
+ ===============================================================================
+ From the specification:
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+ (function() {
+ var threeElementCurve=[2.0, -3.0, 4.0];
+ var inputData=[-0.5, +0.5, +0.75];
+ var expectedData=[-0.5, +0.5, +2.25];
+ executeTest(threeElementCurve, inputData, expectedData, "Testing interpolation (where inputs don't correlate directly to curve elements)");
+ })();
+
+ /*
+ Testing out-of-range inputs (should be mapped to the first/last elements of the curve)
+ ======================================================================================
+ From the specification:
+ Any sample value less than -1 will correspond to the first value in the curve array.
+ Any sample value greater than +1 will correspond to the last value in the curve array.
+ */
+ (function() {
+ var threeElementCurve=[2.0, -3.0, 4.0];
+ var inputData=[-1.5, +1.5];
+ var expectedData=[2.0, 4.0];
+ executeTest(threeElementCurve, inputData, expectedData, "Testing out-of-range inputs (should be mapped to the first/last elements of the curve)");
+ })();
+
+ /*
+ Testing a 2-element curve (does not have a middle element)
+ ==========================================================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+ (function() {
+ var twoElementCurve=[2.0, -2.0];
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[2.0, 0.0, -2.0];
+ executeTest(twoElementCurve, inputData, expectedData, "Testing a 2-element curve (does not have a middle element)");
+ })();
+
+ /*
+ Testing a 4-element curve (does not have a middle element)
+ ==========================================================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+ (function() {
+ var fourElementCurve=[1.0, 2.0, 4.0, 7.0];
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[1.0, 3.0, 7.0];
+ executeTest(fourElementCurve, inputData, expectedData, "Testing a 4-element curve (does not have a middle element)");
+ })();
+
+ /*
+ Testing a huge curve
+ ====================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ */
+ (function() {
+ var bigCurve=[];
+ for(var i=0;i<=60000;i++) { bigCurve.push(i/3.5435); }
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[bigCurve[0], bigCurve[30000], bigCurve[60000]];
+ executeTest(bigCurve, inputData, expectedData, "Testing a huge curve");
+ })();
+
+ /*
+ Testing single-element curve (boundary condition)
+ =================================================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ Any sample value less than -1 will correspond to the first value in the curve array.
+ Any sample value greater than +1 will correspond to the last value in the curve array.
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+
+ /*
+ Testing null curve (should return input values)
+ ===============================================
+ From the specification:
+ Initially the curve attribute is null, which means that the WaveShaperNode will pass its input to its output
+ without modification.
+ */
+ (function() {
+ var inputData=[-1.0, 0, 1.0, 2.0];
+ var expectedData=[-1.0, 0.0, 1.0, 2.0];
+ executeTest(null, inputData, expectedData, "Testing null curve (should return input values)");
+ })();
+
+ /**
+ * Function that does the actual testing (using an asynchronous test).
+ * @param {?Array.<number>} curveData - Array containing values for the WaveShaper curve.
+ * @param {!Array.<number>} inputData - Array containing values for the input stream.
+ * @param {!Array.<number>} expectedData - Array containing expected results for each of the corresponding inputs.
+ * @param {!string} testName - Name of the test case.
+ */
+ function executeTest(curveData, inputData, expectedData, testName) {
+ var stTest=async_test("WaveShaperNode - "+testName);
+ stTest.step(function() {
+
+ // Create offline audio context.
+ var ac=new OfflineAudioContext(1, inputData.length, sampleRate);
+
+ // Create the WaveShaper and its curve.
+ var waveShaper=ac.createWaveShaper();
+ if(curveData!=null) {
+ var curve=new Float32Array(curveData.length);
+ for(var i=0;i<curveData.length;i++) { curve[i]=curveData[i]; }
+ waveShaper.curve=curve;
+ }
+ waveShaper.connect(ac.destination);
+
+ // Create buffer containing the input values.
+ var inputBuffer=ac.createBuffer(1, Math.max(inputData.length, 2), sampleRate);
+ var d=inputBuffer.getChannelData(0);
+ for(var i=0;i<inputData.length;i++) { d[i]=inputData[i]; }
+
+ // Play the input buffer through the WaveShaper.
+ var src=ac.createBufferSource();
+ src.buffer=inputBuffer;
+ src.connect(waveShaper);
+ src.start();
+
+ // Test the outputs match the expected values.
+ ac.oncomplete=stTest.step_func_done(function(ev) {
+ var d=ev.renderedBuffer.getChannelData(0);
+
+ for(var i=0;i<expectedData.length;i++) {
+ var curveText="null";
+ if(curve!=null) {
+ if(curveData.length<20) {
+ curveText=curveData.join(",");
+ } else {
+ curveText="TooBigToDisplay ("+(curveData.length-1)+" elements)";
+ }
+ }
+ var comment="Input="+inputData[i]+", Curve=["+curveText+"] >>> ";
+ assert_approx_equals(d[i], expectedData[i], tolerance, comment);
+ }
+ });
+ ac.startRendering();
+ });
+ }
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/silent-inputs.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/silent-inputs.html
new file mode 100644
index 0000000000..45d2c9ad4b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/silent-inputs.html
@@ -0,0 +1,103 @@
+<!doctype html>
+<html>
+ <head>
+ <title>
+ Test Silent Inputs to WaveShaperNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/resources/audit-util.js"></script>
+ <script src="/webaudio/resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+ let sampleRate = 16000;
+
+ // Identity curve for the wave shaper: the input value is mapped directly
+ // to the output value.
+ let identityCurve = [-1, 0, 1];
+ let nonZeroCurve = [0.5, 0.5, 0.5];
+
+ audit.define(
+ {
+ label: 'test-0',
+ description: 'curve output is non-zero for silent inputs'
+ },
+ (task, should) => {
+ let {context, source, shaper} =
+ setupGraph(nonZeroCurve, sampleRate, sampleRate);
+
+ source.offset.setValueAtTime(0, 0);
+
+ context.startRendering()
+ .then(audioBuffer => {
+ should(
+ audioBuffer.getChannelData(0),
+ 'WaveShaper with silent inputs and curve ' +
+ JSON.stringify(shaper.curve))
+ .beConstantValueOf(0.5);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test-1',
+ description: '2x curve output is non-zero for silent inputs'
+ },
+ (task, should) => {
+ let {context, source, shaper} =
+ setupGraph(nonZeroCurve, sampleRate, sampleRate);
+
+ source.offset.setValueAtTime(0, 0);
+ shaper.overSample = '2x';
+
+ context.startRendering()
+ .then(audioBuffer => {
+ should(
+ audioBuffer.getChannelData(0),
+ 'WaveShaper with ' + shaper.overSample +
+ ' oversample, silent inputs, and curve ' +
+ JSON.stringify(shaper.curve))
+ .beConstantValueOf(0.5);
+ })
+ .then(() => task.done());
+ });
+
+ audit.define(
+ {
+ label: 'test-2',
+ description: 'curve output is non-zero for no inputs'
+ },
+ (task, should) => {
+ let {context, source, shaper} =
+ setupGraph(nonZeroCurve, sampleRate, sampleRate);
+
+ source.disconnect();
+
+ context.startRendering()
+ .then(audioBuffer => {
+ should(
+ audioBuffer.getChannelData(0),
+ 'WaveShaper with no inputs and curve ' +
+ JSON.stringify(shaper.curve))
+ .beConstantValueOf(0.5);
+ })
+ .then(() => task.done());
+ });
+
+ function setupGraph(curve, testFrames, sampleRate) {
+ let context = new OfflineAudioContext(1, testFrames, sampleRate);
+ let source = new ConstantSourceNode(context);
+ let shaper = new WaveShaperNode(context, {curve: curve});
+
+ source.connect(shaper).connect(context.destination);
+
+ return {context: context, source: source, shaper: shaper};
+ }
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html
new file mode 100644
index 0000000000..e897ac08a1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html
@@ -0,0 +1,100 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Test WaveShaper Copies Curve Data
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ // Sample rate and number of frames are fairly arbitrary. We need to
+ // render, however, at least 384 frames. 1024 is a nice small value.
+ let sampleRate = 16000;
+ let renderFrames = 1024;
+
+ let audit = Audit.createTaskRunner();
+
+ audit.define(
+ {
+ label: 'test copying',
+ description: 'Modifying curve should not modify WaveShaper'
+ },
+ (task, should) => {
+ // Two-channel context; channel 0 contains the test data and channel
+ // 1 contains the expected result. Channel 1 has the normal
+ // WaveShaper output and channel 0 has the WaveShaper output with a
+ // modified curve.
+ let context = new OfflineAudioContext(2, renderFrames, sampleRate);
+
+ // Just use a default oscillator as the source. Doesn't really
+ // matter what we use.
+ let src = context.createOscillator();
+ src.type = 'sawtooth';
+
+ // Create the wave shapers: ws0 is the test shaper, and ws1 is the
+ // reference wave shaper.
+ let ws0 = context.createWaveShaper();
+ let ws1 = context.createWaveShaper();
+
+ // Wave shaper curves. Doesn't really matter what we use as long as
+ // it modifies the input in some way. Thus, keep it simple and just
+ // invert the input.
+ let desiredCurve = [1, 0, -1];
+ let curve0 = Float32Array.from(desiredCurve);
+ let curve1 = Float32Array.from(desiredCurve);
+
+ ws0.curve = curve0;
+ ws1.curve = curve1;
+
+ let merger = context.createChannelMerger(2);
+
+ // Connect the graph
+ src.connect(ws0);
+ src.connect(ws1);
+
+ ws0.connect(merger, 0, 0);
+ ws1.connect(merger, 0, 1);
+
+ merger.connect(context.destination);
+
+ // Let the context run for a bit and then modify the curve for ws0.
+ // Doesn't really matter what we modify the curve to as long as it's
+ // different.
+ context.suspend(256 / context.sampleRate)
+ .then(() => {
+ should(
+ () => {
+ curve0[0] = -0.5;
+ curve0[1] = 0.125;
+ curve0[2] = 0.75;
+ },
+ `Modifying curve array at time ${context.currentTime}`)
+ .notThrow();
+ })
+ .then(context.resume.bind(context));
+
+ src.start();
+
+ context.startRendering()
+ .then(function(renderedBuffer) {
+ let actual = renderedBuffer.getChannelData(0);
+ let expected = renderedBuffer.getChannelData(1);
+
+ // Modifying the wave shaper curve should not modify the
+ // output so the outputs from the two wave shaper nodes should
+ // be exactly identical.
+ should(actual, 'Output of WaveShaper with modified curve')
+ .beEqualToArray(expected);
+
+ })
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-limits.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-limits.html
new file mode 100644
index 0000000000..13e88be567
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-limits.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ waveshaper-limits.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let context;
+ let bufferData;
+ let outputData;
+ let reference;
+
+ let sampleRate = 48000;
+ // Must be odd so we have an exact middle point.
+ let testFrames = 23;
+ let scale = 1 / ((testFrames - 1) / 2 - 1);
+ // Number of decimal digits to print
+ let decimals = 6;
+ // Required accuracy
+ let diffThreshold = Math.pow(10, -decimals);
+
+ // Generate reference data
+ function generateReference() {
+ // The curve data is 0, 1, 0, and the input data is a ramp from -1+eps
+ // to 1+eps. Then the output is a ramp from 0 to 1 back to 0.
+ let ref = new Float32Array(testFrames);
+ let midPoint = (testFrames - 1) / 2;
+ // First sample is below -1 at -1-scale.
+ ref[0] = 0;
+ // Generate ramp up to the mid-point
+ for (let k = 0; k < midPoint; ++k) {
+ ref[k + 1] = k * scale;
+ }
+ // The value at the mid-point must be 1, from the curve
+ ref[midPoint] = 1;
+ // Generate a ramp from 1 down to 0
+ for (let k = midPoint; k < testFrames - 1; ++k) {
+ ref[k + 1] = 2 - k * scale;
+ }
+ // The last sample is out of range at 1+scale
+ ref[testFrames - 1] = 0;
+ return ref;
+ }
+
+ function checkResult(renderedBuffer, should) {
+ outputData = renderedBuffer.getChannelData(0);
+ reference = generateReference();
+ let success = true;
+ // Verify that every output value matches our expected reference value.
+ for (let k = 0; k < outputData.length; ++k) {
+ let diff = outputData[k] - reference[k];
+ should(
+ Math.abs(diff),
+ 'Max error mapping ' + bufferData[k].toFixed(decimals) + ' to ' +
+ outputData[k].toFixed(decimals))
+ .beLessThanOrEqualTo(diffThreshold);
+ }
+ }
+
+ audit.define(
+ {
+ label: 'test',
+ description:
+ 'WaveShaperNode including values outside the range of [-1,1]'
+ },
+ function(task, should) {
+ context = new OfflineAudioContext(1, testFrames, sampleRate);
+ // Create input values between -1.1 and 1.1
+ let buffer =
+ context.createBuffer(1, testFrames, context.sampleRate);
+ bufferData = new Float32Array(testFrames);
+ let start = -1 - scale;
+ for (let k = 0; k < testFrames; ++k) {
+ bufferData[k] = k * scale + start;
+ }
+ buffer.copyToChannel(bufferData, 0);
+
+ let source = context.createBufferSource();
+ source.buffer = buffer;
+
+ // Create simple waveshaper. It should map -1 to 0, 0 to 1, and +1
+ // to 0 and interpolate all points in between using a simple linear
+ // interpolator.
+ let shaper = context.createWaveShaper();
+ let curve = new Float32Array(3);
+ curve[0] = 0;
+ curve[1] = 1;
+ curve[2] = 0;
+ shaper.curve = curve;
+ source.connect(shaper);
+ shaper.connect(context.destination);
+
+ source.start();
+ context.startRendering()
+ .then(buffer => checkResult(buffer, should))
+ .then(() => task.done());
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-simple.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-simple.html
new file mode 100644
index 0000000000..affd0c58af
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-simple.html
@@ -0,0 +1,61 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ Simple Tests of WaveShaperNode
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ audit.define('simple', (task, should) => {
+ let context = new OfflineAudioContext(1, 1, 48000);
+ let shaper = context.createWaveShaper();
+
+ // Verify default values are correct.
+ should(shaper.curve, 'Initial WaveShaper.curve').beEqualTo(null);
+ should(shaper.oversample, 'Initial WaveShaper.oversample')
+ .beEqualTo('none');
+
+ // Set oversample and verify that it is set correctly.
+ should(() => shaper.oversample = '2x', 'Setting oversample to "2x"')
+ .notThrow();
+ should(shaper.oversample, 'Waveshaper.oversample = "2x"')
+ .beEqualTo('2x');
+
+ should(() => shaper.oversample = '4x', 'Setting oversample to "4x"')
+ .notThrow();
+ should(shaper.oversample, 'Waveshaper.oversample = "4x"')
+ .beEqualTo('4x');
+
+ should(
+ () => shaper.oversample = 'invalid',
+ 'Setting oversample to "invalid"')
+ .notThrow();
+ should(shaper.oversample, 'Waveshaper.oversample = "invalid"')
+ .beEqualTo('4x');
+
+ // Set the curve and verify that the returned curve is the same as what
+ // it was set to.
+ let curve = Float32Array.from([-1, 0.25, .75]);
+ should(() => shaper.curve = curve, 'Setting curve to [' + curve + ']')
+ .notThrow();
+ should(shaper.curve, 'WaveShaper.curve').beEqualToArray(curve);
+
+ // Verify setting the curve to null works.
+ should(() => shaper.curve = null, 'Setting curve back to null')
+ .notThrow();
+ should(shaper.curve, 'Waveshaper.curve = null').beEqualTo(null);
+
+ task.done();
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper.html
new file mode 100644
index 0000000000..8bfa009b18
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper.html
@@ -0,0 +1,127 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>
+ waveshaper.html
+ </title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="../../resources/audit-util.js"></script>
+ <script src="../../resources/audit.js"></script>
+ <script src="../../resources/buffer-loader.js"></script>
+ </head>
+ <body>
+ <script id="layout-test-code">
+ let audit = Audit.createTaskRunner();
+
+ let sampleRate = 44100;
+ let lengthInSeconds = 4;
+ let numberOfRenderFrames = sampleRate * lengthInSeconds;
+ let numberOfCurveFrames = 65536;
+ let inputBuffer;
+ let waveShapingCurve;
+
+ let context;
+
+ function generateInputBuffer() {
+ // Create mono input buffer.
+ let buffer =
+ context.createBuffer(1, numberOfRenderFrames, context.sampleRate);
+ let data = buffer.getChannelData(0);
+
+ // Generate an input vector with values from -1 -> +1 over a duration of
+ // lengthInSeconds. This exercises the full nominal input range and will
+ // touch every point of the shaping curve.
+ for (let i = 0; i < numberOfRenderFrames; ++i) {
+ let x = i / numberOfRenderFrames; // 0 -> 1
+ x = 2 * x - 1; // -1 -> +1
+ data[i] = x;
+ }
+
+ return buffer;
+ }
+
+ // Generates a symmetric curve: Math.atan(5 * x) / (0.5 * Math.PI)
+ // (with x == 0 corresponding to the center of the array)
+ // This curve is arbitrary, but would be useful in the real-world.
+ // To some extent, the actual curve we choose is not important in this
+ // test, since the input vector walks through all possible curve values.
+ function generateWaveShapingCurve() {
+ let curve = new Float32Array(numberOfCurveFrames);
+
+ let n = numberOfCurveFrames;
+ let n2 = n / 2;
+
+ for (let i = 0; i < n; ++i) {
+ let x = (i - n2) / n2;
+ let y = Math.atan(5 * x) / (0.5 * Math.PI);
+ }
+
+ return curve;
+ }
+
+ function checkShapedCurve(buffer, should) {
+ let inputData = inputBuffer.getChannelData(0);
+ let outputData = buffer.getChannelData(0);
+
+ let success = true;
+
+ // Go through every sample and make sure it has been shaped exactly
+ // according to the shaping curve we gave it.
+ for (let i = 0; i < buffer.length; ++i) {
+ let input = inputData[i];
+
+ // Calculate an index based on input -1 -> +1 with 0 being at the
+ // center of the curve data.
+ let index = Math.floor(numberOfCurveFrames * 0.5 * (input + 1));
+
+ // Clip index to the input range of the curve.
+ // This takes care of input outside of nominal range -1 -> +1
+ index = index < 0 ? 0 : index;
+ index =
+ index > numberOfCurveFrames - 1 ? numberOfCurveFrames - 1 : index;
+
+ let expectedOutput = waveShapingCurve[index];
+
+ let output = outputData[i];
+
+ if (output != expectedOutput) {
+ success = false;
+ break;
+ }
+ }
+
+ should(
+ success, 'WaveShaperNode applied non-linear distortion correctly')
+ .beTrue();
+ }
+
+ audit.define('test', function(task, should) {
+ // Create offline audio context.
+ context = new OfflineAudioContext(1, numberOfRenderFrames, sampleRate);
+
+ // source -> waveshaper -> destination
+ let source = context.createBufferSource();
+ let waveshaper = context.createWaveShaper();
+ source.connect(waveshaper);
+ waveshaper.connect(context.destination);
+
+ // Create an input test vector.
+ inputBuffer = generateInputBuffer();
+ source.buffer = inputBuffer;
+
+ // We'll apply non-linear distortion according to this shaping curve.
+ waveShapingCurve = generateWaveShapingCurve();
+ waveshaper.curve = waveShapingCurve;
+
+ source.start(0);
+
+ context.startRendering()
+ .then(buffer => checkShapedCurve(buffer, should))
+ .then(task.done.bind(task));
+ });
+
+ audit.run();
+ </script>
+ </body>
+</html>