1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
const lazy = {};
ChromeUtils.defineESModuleGetters(lazy, {
EventEmitter: "resource://gre/modules/EventEmitter.sys.mjs",
});
XPCOMUtils.defineLazyServiceGetter(
lazy,
"IOUtil",
"@mozilla.org/io-util;1",
"nsIIOUtil"
);
ChromeUtils.defineLazyGetter(lazy, "ScriptableInputStream", () => {
return Components.Constructor(
"@mozilla.org/scriptableinputstream;1",
"nsIScriptableInputStream",
"init"
);
});
const BUFFER_SIZE = 0x8000;
/**
* This helper function (and its companion object) are used by bulk
* senders and receivers to read and write data in and out of other streams.
* Functions that make use of this tool are passed to callers when it is
* time to read or write bulk data. It is highly recommended to use these
* copier functions instead of the stream directly because the copier
* enforces the agreed upon length. Since bulk mode reuses an existing
* stream, the sender and receiver must write and read exactly the agreed
* upon amount of data, or else the entire transport will be left in a
* invalid state. Additionally, other methods of stream copying (such as
* NetUtil.asyncCopy) close the streams involved, which would terminate
* the debugging transport, and so it is avoided here.
*
* Overall, this *works*, but clearly the optimal solution would be
* able to just use the streams directly. If it were possible to fully
* implement nsIInputStream/nsIOutputStream in JS, wrapper streams could
* be created to enforce the length and avoid closing, and consumers could
* use familiar stream utilities like NetUtil.asyncCopy.
*
* The function takes two async streams and copies a precise number
* of bytes from one to the other. Copying begins immediately, but may
* complete at some future time depending on data size. Use the returned
* promise to know when it's complete.
*
* @param {nsIAsyncInputStream} input
* Stream to copy from.
* @param {nsIAsyncOutputStream} output
* Stream to copy to.
* @param {number} length
* Amount of data that needs to be copied.
*
* @returns {Promise}
* Promise is resolved when copying completes or rejected if any
* (unexpected) errors occur.
*/
function copyStream(input, output, length) {
let copier = new StreamCopier(input, output, length);
return copier.copy();
}
/** @class */
function StreamCopier(input, output, length) {
lazy.EventEmitter.decorate(this);
this._id = StreamCopier._nextId++;
this.input = input;
// Save off the base output stream, since we know it's async as we've
// required
this.baseAsyncOutput = output;
if (lazy.IOUtil.outputStreamIsBuffered(output)) {
this.output = output;
} else {
this.output = Cc[
"@mozilla.org/network/buffered-output-stream;1"
].createInstance(Ci.nsIBufferedOutputStream);
this.output.init(output, BUFFER_SIZE);
}
this._length = length;
this._amountLeft = length;
this._deferred = {
promise: new Promise((resolve, reject) => {
this._deferred.resolve = resolve;
this._deferred.reject = reject;
}),
};
this._copy = this._copy.bind(this);
this._flush = this._flush.bind(this);
this._destroy = this._destroy.bind(this);
// Copy promise's then method up to this object.
//
// Allows the copier to offer a promise interface for the simple succeed
// or fail scenarios, but also emit events (due to the EventEmitter)
// for other states, like progress.
this.then = this._deferred.promise.then.bind(this._deferred.promise);
this.then(this._destroy, this._destroy);
// Stream ready callback starts as |_copy|, but may switch to |_flush|
// at end if flushing would block the output stream.
this._streamReadyCallback = this._copy;
}
StreamCopier._nextId = 0;
StreamCopier.prototype = {
copy() {
// Dispatch to the next tick so that it's possible to attach a progress
// event listener, even for extremely fast copies (like when testing).
Services.tm.currentThread.dispatch(() => {
try {
this._copy();
} catch (e) {
this._deferred.reject(e);
}
}, 0);
return this;
},
_copy() {
let bytesAvailable = this.input.available();
let amountToCopy = Math.min(bytesAvailable, this._amountLeft);
this._debug("Trying to copy: " + amountToCopy);
let bytesCopied;
try {
bytesCopied = this.output.writeFrom(this.input, amountToCopy);
} catch (e) {
if (e.result == Cr.NS_BASE_STREAM_WOULD_BLOCK) {
this._debug("Base stream would block, will retry");
this._debug("Waiting for output stream");
this.baseAsyncOutput.asyncWait(this, 0, 0, Services.tm.currentThread);
return;
}
throw e;
}
this._amountLeft -= bytesCopied;
this._debug("Copied: " + bytesCopied + ", Left: " + this._amountLeft);
this._emitProgress();
if (this._amountLeft === 0) {
this._debug("Copy done!");
this._flush();
return;
}
this._debug("Waiting for input stream");
this.input.asyncWait(this, 0, 0, Services.tm.currentThread);
},
_emitProgress() {
this.emit("progress", {
bytesSent: this._length - this._amountLeft,
totalBytes: this._length,
});
},
_flush() {
try {
this.output.flush();
} catch (e) {
if (
e.result == Cr.NS_BASE_STREAM_WOULD_BLOCK ||
e.result == Cr.NS_ERROR_FAILURE
) {
this._debug("Flush would block, will retry");
this._streamReadyCallback = this._flush;
this._debug("Waiting for output stream");
this.baseAsyncOutput.asyncWait(this, 0, 0, Services.tm.currentThread);
return;
}
throw e;
}
this._deferred.resolve();
},
_destroy() {
this._destroy = null;
this._copy = null;
this._flush = null;
this.input = null;
this.output = null;
},
// nsIInputStreamCallback
onInputStreamReady() {
this._streamReadyCallback();
},
// nsIOutputStreamCallback
onOutputStreamReady() {
this._streamReadyCallback();
},
_debug() {},
};
/**
* Read from a stream, one byte at a time, up to the next
* <var>delimiter</var> character, but stopping if we've read |count|
* without finding it. Reading also terminates early if there are less
* than <var>count</var> bytes available on the stream. In that case,
* we only read as many bytes as the stream currently has to offer.
*
* @param {nsIInputStream} stream
* Input stream to read from.
* @param {string} delimiter
* Character we're trying to find.
* @param {number} count
* Max number of characters to read while searching.
*
* @returns {string}
* Collected data. If the delimiter was found, this string will
* end with it.
*/
// TODO: This implementation could be removed if bug 984651 is fixed,
// which provides a native version of the same idea.
function delimitedRead(stream, delimiter, count) {
let scriptableStream;
if (stream instanceof Ci.nsIScriptableInputStream) {
scriptableStream = stream;
} else {
scriptableStream = new lazy.ScriptableInputStream(stream);
}
let data = "";
// Don't exceed what's available on the stream
count = Math.min(count, stream.available());
if (count <= 0) {
return data;
}
let char;
while (char !== delimiter && count > 0) {
char = scriptableStream.readBytes(1);
count--;
data += char;
}
return data;
}
export const StreamUtils = {
copyStream,
delimitedRead,
};
|