diff options
Diffstat (limited to 'dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html')
-rw-r--r-- | dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html b/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html new file mode 100644 index 0000000000..5e9a9960b7 --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html @@ -0,0 +1,80 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// We do not use our generic graph test framework here because +// the testing logic here is sort of complicated, and would +// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes +// can experience delays. + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(6, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + for (var j = 0; j < 6; ++j) { + buffer.getChannelData(0)[i] = Math.sin(440 * j * Math.PI * i / context.sampleRate); + } + } + + var monoBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + monoBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + + var sp = context.createScriptProcessor(2048, 3); + expectException(function() { sp.channelCount = 2; }, + DOMException.NOT_SUPPORTED_ERR); + sp.channelCountMode = "explicit"; + expectException(function() { sp.channelCountMode = "max"; }, + DOMException.NOT_SUPPORTED_ERR); + expectException(function() { sp.channelCountMode = "clamped-max"; }, + DOMException.NOT_SUPPORTED_ERR); + sp.channelInterpretation = "discrete"; + source.start(0); + source.buffer = buffer; + source.connect(sp); + sp.connect(context.destination); + + var monoSource = context.createBufferSource(); + monoSource.buffer = monoBuffer; + monoSource.connect(sp); + monoSource.start(2048 / context.sampleRate); + + sp.onaudioprocess = function(e) { + is(e.inputBuffer.numberOfChannels, 3, "Should be correctly down-mixed to three channels"); + for (var i = 0; i < 3; ++i) { + compareChannels(e.inputBuffer.getChannelData(i), buffer.getChannelData(i)); + } + + // On the next iteration, we'll get a silence buffer + sp.onaudioprocess = function(e) { + var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate); + is(e.inputBuffer.numberOfChannels, 3, "Should be correctly up-mixed to three channels"); + compareChannels(e.inputBuffer.getChannelData(0), monoBuffer.getChannelData(0)); + for (var i = 1; i < 3; ++i) { + compareChannels(e.inputBuffer.getChannelData(i), emptyBuffer.getChannelData(0)); + } + + sp.onaudioprocess = null; + sp.disconnect(context.destination); + + SimpleTest.finish(); + }; + }; +}); + +</script> +</pre> +</body> +</html> |