1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
<!DOCTYPE HTML>
<html>
<head>
<title>Test ScriptProcessorNode</title>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="text/javascript" src="webaudio.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script class="testbody" type="text/javascript">
// We do not use our generic graph test framework here because
// the testing logic here is sort of complicated, and would
// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes
// can experience delays.
SimpleTest.waitForExplicitFinish();
addLoadEvent(function() {
var context = new AudioContext();
var buffer = null;
var sourceSP = context.createScriptProcessor(2048);
sourceSP.addEventListener("audioprocess", function(e) {
// generate the audio
for (var i = 0; i < 2048; ++i) {
// Make sure our first sample won't be zero
e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate);
e.outputBuffer.getChannelData(1)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate);
}
// Remember our generated audio
buffer = e.outputBuffer;
sourceSP.removeEventListener("audioprocess", arguments.callee);
});
expectException(function() {
context.createScriptProcessor(1);
}, DOMException.INDEX_SIZE_ERR);
expectException(function() {
context.createScriptProcessor(2);
}, DOMException.INDEX_SIZE_ERR);
expectException(function() {
context.createScriptProcessor(128);
}, DOMException.INDEX_SIZE_ERR);
expectException(function() {
context.createScriptProcessor(255);
}, DOMException.INDEX_SIZE_ERR);
is(sourceSP.channelCount, 2, "script processor node has 2 input channels by default");
is(sourceSP.channelCountMode, "explicit", "Correct channelCountMode for the script processor node");
is(sourceSP.channelInterpretation, "speakers", "Correct channelCountInterpretation for the script processor node");
function findFirstNonZeroSample(buffer) {
for (var i = 0; i < buffer.length; ++i) {
if (buffer.getChannelData(0)[i] != 0) {
return i;
}
}
return buffer.length;
}
var sp = context.createScriptProcessor(2048);
sourceSP.connect(sp);
sp.connect(context.destination);
var lastPlaybackTime = 0;
var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate);
function checkAudioProcessingEvent(e) {
is(e.target, sp, "Correct event target");
ok(e.playbackTime > lastPlaybackTime, "playbackTime correctly set");
lastPlaybackTime = e.playbackTime;
is(e.inputBuffer.numberOfChannels, 2, "Correct number of channels for the input buffer");
is(e.inputBuffer.length, 2048, "Correct length for the input buffer");
is(e.inputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the input buffer");
is(e.outputBuffer.numberOfChannels, 2, "Correct number of channels for the output buffer");
is(e.outputBuffer.length, 2048, "Correct length for the output buffer");
is(e.outputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the output buffer");
compareChannels(e.outputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
compareChannels(e.outputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
}
sp.onaudioprocess = function(e) {
isnot(buffer, null, "The audioprocess handler for sourceSP must be run at this point");
checkAudioProcessingEvent(e);
// Because of the initial latency added by the second script processor node,
// we will never see any generated audio frames in the first callback.
compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0));
compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0));
sp.onaudioprocess = function(e) {
checkAudioProcessingEvent(e);
var firstNonZero = findFirstNonZeroSample(e.inputBuffer);
ok(firstNonZero <= 2048, "First non-zero sample within range");
compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), firstNonZero);
compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), firstNonZero);
compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), 2048 - firstNonZero, firstNonZero, 0);
compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), 2048 - firstNonZero, firstNonZero, 0);
if (firstNonZero == 0) {
// If we did not experience any delays, the test is done!
sp.onaudioprocess = null;
SimpleTest.finish();
} else if (firstNonZero != 2048) {
// In case we just saw a zero buffer this time, wait one more round
sp.onaudioprocess = function(e) {
checkAudioProcessingEvent(e);
compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), firstNonZero, 0, 2048 - firstNonZero);
compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), firstNonZero, 0, 2048 - firstNonZero);
compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), undefined, firstNonZero);
compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), undefined, firstNonZero);
sp.onaudioprocess = null;
SimpleTest.finish();
};
}
};
};
});
</script>
</pre>
</body>
</html>
|