summaryrefslogtreecommitdiffstats
path: root/testing/xpcshell
diff options
context:
space:
mode:
Diffstat (limited to 'testing/xpcshell')
-rw-r--r--testing/xpcshell/README6
-rw-r--r--testing/xpcshell/dbg-actors.js48
-rw-r--r--testing/xpcshell/dns-packet/.editorconfig10
-rw-r--r--testing/xpcshell/dns-packet/.eslintrc9
-rw-r--r--testing/xpcshell/dns-packet/.gitignore4
-rw-r--r--testing/xpcshell/dns-packet/.travis.yml11
-rw-r--r--testing/xpcshell/dns-packet/CHANGELOG.md30
-rw-r--r--testing/xpcshell/dns-packet/LICENSE21
-rw-r--r--testing/xpcshell/dns-packet/README.md365
-rw-r--r--testing/xpcshell/dns-packet/classes.js23
-rw-r--r--testing/xpcshell/dns-packet/examples/doh.js52
-rw-r--r--testing/xpcshell/dns-packet/examples/tcp.js52
-rw-r--r--testing/xpcshell/dns-packet/examples/tls.js61
-rw-r--r--testing/xpcshell/dns-packet/examples/udp.js28
-rw-r--r--testing/xpcshell/dns-packet/index.js1841
-rw-r--r--testing/xpcshell/dns-packet/opcodes.js50
-rw-r--r--testing/xpcshell/dns-packet/optioncodes.js61
-rw-r--r--testing/xpcshell/dns-packet/package.json48
-rw-r--r--testing/xpcshell/dns-packet/rcodes.js50
-rw-r--r--testing/xpcshell/dns-packet/test.js613
-rw-r--r--testing/xpcshell/dns-packet/types.js105
-rw-r--r--testing/xpcshell/example/moz.build12
-rw-r--r--testing/xpcshell/example/unit/check_profile.js44
-rw-r--r--testing/xpcshell/example/unit/file.txt1
-rw-r--r--testing/xpcshell/example/unit/import_module.sys.mjs9
-rw-r--r--testing/xpcshell/example/unit/load_subscript.js6
-rw-r--r--testing/xpcshell/example/unit/location_load.js8
-rw-r--r--testing/xpcshell/example/unit/prefs_test_common.js47
-rw-r--r--testing/xpcshell/example/unit/subdir/file.txt1
-rw-r--r--testing/xpcshell/example/unit/test_add_setup.js23
-rw-r--r--testing/xpcshell/example/unit/test_check_nsIException.js10
-rw-r--r--testing/xpcshell/example/unit/test_check_nsIException_failing.js10
-rw-r--r--testing/xpcshell/example/unit/test_do_check_matches.js14
-rw-r--r--testing/xpcshell/example/unit/test_do_check_matches_failing.js12
-rw-r--r--testing/xpcshell/example/unit/test_do_check_null.js6
-rw-r--r--testing/xpcshell/example/unit/test_do_check_null_failing.js6
-rw-r--r--testing/xpcshell/example/unit/test_do_get_tempdir.js14
-rw-r--r--testing/xpcshell/example/unit/test_execute_soon.js20
-rw-r--r--testing/xpcshell/example/unit/test_fail.js8
-rw-r--r--testing/xpcshell/example/unit/test_get_file.js31
-rw-r--r--testing/xpcshell/example/unit/test_get_idle.js24
-rw-r--r--testing/xpcshell/example/unit/test_import_module.js19
-rw-r--r--testing/xpcshell/example/unit/test_load.js20
-rw-r--r--testing/xpcshell/example/unit/test_load_httpd_js.js13
-rw-r--r--testing/xpcshell/example/unit/test_location.js13
-rw-r--r--testing/xpcshell/example/unit/test_multiple_setups.js13
-rw-r--r--testing/xpcshell/example/unit/test_multiple_tasks.js20
-rw-r--r--testing/xpcshell/example/unit/test_prefs_defaults.js18
-rw-r--r--testing/xpcshell/example/unit/test_prefs_defaults_and_file.js42
-rw-r--r--testing/xpcshell/example/unit/test_prefs_defaults_included.js16
-rw-r--r--testing/xpcshell/example/unit/test_prefs_no_defaults.js15
-rw-r--r--testing/xpcshell/example/unit/test_prefs_no_defaults_with_file.js15
-rw-r--r--testing/xpcshell/example/unit/test_profile.js11
-rw-r--r--testing/xpcshell/example/unit/test_profile_afterChange.js11
-rw-r--r--testing/xpcshell/example/unit/test_sample.js21
-rw-r--r--testing/xpcshell/example/unit/test_skip.js8
-rw-r--r--testing/xpcshell/example/unit/test_tasks_skip.js21
-rw-r--r--testing/xpcshell/example/unit/test_tasks_skipall.js23
-rw-r--r--testing/xpcshell/example/unit/xpcshell-included-with-prefs.ini5
-rw-r--r--testing/xpcshell/example/unit/xpcshell-with-prefs.ini16
-rw-r--r--testing/xpcshell/example/unit/xpcshell.ini60
-rw-r--r--testing/xpcshell/head.js1897
-rw-r--r--testing/xpcshell/mach_commands.py278
-rw-r--r--testing/xpcshell/mach_test_package_commands.py48
-rw-r--r--testing/xpcshell/moz-http2/http2-cert.key28
-rw-r--r--testing/xpcshell/moz-http2/http2-cert.key.keyspec1
-rw-r--r--testing/xpcshell/moz-http2/http2-cert.pem19
-rw-r--r--testing/xpcshell/moz-http2/http2-cert.pem.certspec4
-rw-r--r--testing/xpcshell/moz-http2/moz-http2-child.js33
-rw-r--r--testing/xpcshell/moz-http2/moz-http2.js2087
-rw-r--r--testing/xpcshell/moz-http2/proxy-cert.key28
-rw-r--r--testing/xpcshell/moz-http2/proxy-cert.key.keyspec1
-rw-r--r--testing/xpcshell/moz-http2/proxy-cert.pem19
-rw-r--r--testing/xpcshell/moz-http2/proxy-cert.pem.certspec4
-rw-r--r--testing/xpcshell/moz.build15
-rw-r--r--testing/xpcshell/node-http2/.gitignore7
-rw-r--r--testing/xpcshell/node-http2/.travis.yml5
-rw-r--r--testing/xpcshell/node-http2/HISTORY.md264
-rw-r--r--testing/xpcshell/node-http2/LICENSE22
-rw-r--r--testing/xpcshell/node-http2/README.md173
-rw-r--r--testing/xpcshell/node-http2/example/client.js48
-rw-r--r--testing/xpcshell/node-http2/example/localhost.crt14
-rw-r--r--testing/xpcshell/node-http2/example/localhost.key15
-rw-r--r--testing/xpcshell/node-http2/example/server.js67
-rw-r--r--testing/xpcshell/node-http2/lib/http.js1276
-rw-r--r--testing/xpcshell/node-http2/lib/index.js52
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/compressor.js1428
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/connection.js630
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/endpoint.js262
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/flow.js345
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/framer.js1166
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/index.js91
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/stream.js677
-rw-r--r--testing/xpcshell/node-http2/package.json46
-rw-r--r--testing/xpcshell/node-http2/test/compressor.js575
-rw-r--r--testing/xpcshell/node-http2/test/connection.js237
-rw-r--r--testing/xpcshell/node-http2/test/endpoint.js41
-rw-r--r--testing/xpcshell/node-http2/test/flow.js260
-rw-r--r--testing/xpcshell/node-http2/test/framer.js395
-rw-r--r--testing/xpcshell/node-http2/test/http.js793
-rw-r--r--testing/xpcshell/node-http2/test/stream.js413
-rw-r--r--testing/xpcshell/node-http2/test/util.js89
-rw-r--r--testing/xpcshell/node-ip/.gitignore2
-rw-r--r--testing/xpcshell/node-ip/.jscsrc46
-rw-r--r--testing/xpcshell/node-ip/.jshintrc89
-rw-r--r--testing/xpcshell/node-ip/.travis.yml15
-rw-r--r--testing/xpcshell/node-ip/README.md90
-rw-r--r--testing/xpcshell/node-ip/lib/ip.js416
-rw-r--r--testing/xpcshell/node-ip/package.json21
-rw-r--r--testing/xpcshell/node-ip/test/api-test.js407
-rw-r--r--testing/xpcshell/node-ws/.eslintrc.yaml19
-rw-r--r--testing/xpcshell/node-ws/.gitignore4
-rw-r--r--testing/xpcshell/node-ws/.npmrc1
-rw-r--r--testing/xpcshell/node-ws/.prettierrc.yaml5
-rw-r--r--testing/xpcshell/node-ws/LICENSE19
-rw-r--r--testing/xpcshell/node-ws/README.md495
-rw-r--r--testing/xpcshell/node-ws/SECURITY.md39
-rw-r--r--testing/xpcshell/node-ws/bench/parser.benchmark.js95
-rw-r--r--testing/xpcshell/node-ws/bench/sender.benchmark.js48
-rw-r--r--testing/xpcshell/node-ws/bench/speed.js115
-rw-r--r--testing/xpcshell/node-ws/browser.js8
-rw-r--r--testing/xpcshell/node-ws/doc/ws.md669
-rw-r--r--testing/xpcshell/node-ws/examples/express-session-parse/index.js101
-rw-r--r--testing/xpcshell/node-ws/examples/express-session-parse/package.json11
-rw-r--r--testing/xpcshell/node-ws/examples/express-session-parse/public/app.js67
-rw-r--r--testing/xpcshell/node-ws/examples/express-session-parse/public/index.html24
-rw-r--r--testing/xpcshell/node-ws/examples/server-stats/index.js33
-rw-r--r--testing/xpcshell/node-ws/examples/server-stats/package.json9
-rw-r--r--testing/xpcshell/node-ws/examples/server-stats/public/index.html63
-rw-r--r--testing/xpcshell/node-ws/examples/ssl.js37
-rw-r--r--testing/xpcshell/node-ws/index.js13
-rw-r--r--testing/xpcshell/node-ws/lib/buffer-util.js127
-rw-r--r--testing/xpcshell/node-ws/lib/constants.js12
-rw-r--r--testing/xpcshell/node-ws/lib/event-target.js266
-rw-r--r--testing/xpcshell/node-ws/lib/extension.js203
-rw-r--r--testing/xpcshell/node-ws/lib/limiter.js55
-rw-r--r--testing/xpcshell/node-ws/lib/permessage-deflate.js511
-rw-r--r--testing/xpcshell/node-ws/lib/receiver.js618
-rw-r--r--testing/xpcshell/node-ws/lib/sender.js478
-rw-r--r--testing/xpcshell/node-ws/lib/stream.js159
-rw-r--r--testing/xpcshell/node-ws/lib/subprotocol.js62
-rw-r--r--testing/xpcshell/node-ws/lib/validation.js125
-rw-r--r--testing/xpcshell/node-ws/lib/websocket-server.js535
-rw-r--r--testing/xpcshell/node-ws/lib/websocket.js1305
-rw-r--r--testing/xpcshell/node-ws/package.json61
-rw-r--r--testing/xpcshell/node-ws/test/autobahn-server.js17
-rw-r--r--testing/xpcshell/node-ws/test/autobahn.js39
-rw-r--r--testing/xpcshell/node-ws/test/buffer-util.test.js15
-rw-r--r--testing/xpcshell/node-ws/test/create-websocket-stream.test.js598
-rw-r--r--testing/xpcshell/node-ws/test/event-target.test.js253
-rw-r--r--testing/xpcshell/node-ws/test/extension.test.js190
-rw-r--r--testing/xpcshell/node-ws/test/fixtures/ca-certificate.pem12
-rw-r--r--testing/xpcshell/node-ws/test/fixtures/ca-key.pem5
-rw-r--r--testing/xpcshell/node-ws/test/fixtures/certificate.pem12
-rw-r--r--testing/xpcshell/node-ws/test/fixtures/client-certificate.pem12
-rw-r--r--testing/xpcshell/node-ws/test/fixtures/client-key.pem5
-rw-r--r--testing/xpcshell/node-ws/test/fixtures/key.pem5
-rw-r--r--testing/xpcshell/node-ws/test/limiter.test.js41
-rw-r--r--testing/xpcshell/node-ws/test/permessage-deflate.test.js647
-rw-r--r--testing/xpcshell/node-ws/test/receiver.test.js1086
-rw-r--r--testing/xpcshell/node-ws/test/sender.test.js370
-rw-r--r--testing/xpcshell/node-ws/test/subprotocol.test.js91
-rw-r--r--testing/xpcshell/node-ws/test/validation.test.js52
-rw-r--r--testing/xpcshell/node-ws/test/websocket-server.test.js1284
-rw-r--r--testing/xpcshell/node-ws/test/websocket.integration.js55
-rw-r--r--testing/xpcshell/node-ws/test/websocket.test.js4514
-rw-r--r--testing/xpcshell/node-ws/wrapper.mjs8
-rw-r--r--testing/xpcshell/odoh-wasm/Cargo.toml42
-rw-r--r--testing/xpcshell/odoh-wasm/LICENSE_APACHE176
-rw-r--r--testing/xpcshell/odoh-wasm/LICENSE_MIT25
-rw-r--r--testing/xpcshell/odoh-wasm/README.md75
-rw-r--r--testing/xpcshell/odoh-wasm/pkg/README.md69
-rw-r--r--testing/xpcshell/odoh-wasm/pkg/odoh_wasm.d.ts16
-rw-r--r--testing/xpcshell/odoh-wasm/pkg/odoh_wasm.js132
-rw-r--r--testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasmbin0 -> 165175 bytes
-rw-r--r--testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm.d.ts7
-rw-r--r--testing/xpcshell/odoh-wasm/pkg/package.json15
-rw-r--r--testing/xpcshell/odoh-wasm/src/lib.rs158
-rw-r--r--testing/xpcshell/remotexpcshelltests.py791
-rwxr-xr-xtesting/xpcshell/runxpcshelltests.py2250
-rwxr-xr-xtesting/xpcshell/selftest.py1474
-rw-r--r--testing/xpcshell/xpcshellcommandline.py412
182 files changed, 40458 insertions, 0 deletions
diff --git a/testing/xpcshell/README b/testing/xpcshell/README
new file mode 100644
index 0000000000..28de62e607
--- /dev/null
+++ b/testing/xpcshell/README
@@ -0,0 +1,6 @@
+Simple xpcshell-based test harness
+
+converted from netwerk/test/unit
+
+Some documentation at https://developer.mozilla.org/en-US/docs/Mozilla/QA/Writing_xpcshell-based_unit_tests
+See also http://wiki.mozilla.org/SoftwareTesting:Tools:Simple_xpcshell_test_harness
diff --git a/testing/xpcshell/dbg-actors.js b/testing/xpcshell/dbg-actors.js
new file mode 100644
index 0000000000..f9a44f1295
--- /dev/null
+++ b/testing/xpcshell/dbg-actors.js
@@ -0,0 +1,48 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* globals require, exports, Services */
+
+"use strict";
+
+const { DevToolsServer } = require("devtools/server/devtools-server");
+const { RootActor } = require("devtools/server/actors/root");
+const { BrowserTabList } = require("devtools/server/actors/webbrowser");
+const { ProcessActorList } = require("devtools/server/actors/process");
+const {
+ ActorRegistry,
+} = require("devtools/server/actors/utils/actor-registry");
+
+/**
+ * xpcshell-test (XPCST) specific actors.
+ *
+ */
+
+/**
+ * Construct a root actor appropriate for use in a server running xpcshell
+ * tests. <snip boilerplate> :)
+ */
+function createRootActor(connection) {
+ let parameters = {
+ tabList: new XPCSTTabList(connection),
+ processList: new ProcessActorList(),
+ globalActorFactories: ActorRegistry.globalActorFactories,
+ onShutdown() {
+ // If the user never switches to the "debugger" tab we might get a
+ // shutdown before we've attached.
+ Services.obs.notifyObservers(null, "xpcshell-test-devtools-shutdown");
+ },
+ };
+ return new RootActor(connection, parameters);
+}
+exports.createRootActor = createRootActor;
+
+/**
+ * A "stub" TabList implementation that provides no tabs.
+ */
+class XPCSTTabList extends BrowserTabList {
+ getList() {
+ return Promise.resolve([]);
+ }
+}
diff --git a/testing/xpcshell/dns-packet/.editorconfig b/testing/xpcshell/dns-packet/.editorconfig
new file mode 100644
index 0000000000..aaaa7a4baa
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.editorconfig
@@ -0,0 +1,10 @@
+root = true
+
+[*]
+indent_style = space
+indent_size = 2
+tab_width = 2
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
diff --git a/testing/xpcshell/dns-packet/.eslintrc b/testing/xpcshell/dns-packet/.eslintrc
new file mode 100644
index 0000000000..d3ed05cf86
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.eslintrc
@@ -0,0 +1,9 @@
+root: true
+
+parserOptions:
+ ecmaVersion: 2015
+
+env:
+ node: true
+
+extends: standard
diff --git a/testing/xpcshell/dns-packet/.gitignore b/testing/xpcshell/dns-packet/.gitignore
new file mode 100644
index 0000000000..cea4849cd9
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.gitignore
@@ -0,0 +1,4 @@
+node_modules/
+.nyc_output/
+coverage/
+package-lock.json
diff --git a/testing/xpcshell/dns-packet/.travis.yml b/testing/xpcshell/dns-packet/.travis.yml
new file mode 100644
index 0000000000..e0211604d3
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.travis.yml
@@ -0,0 +1,11 @@
+language: node_js
+node_js:
+ - node
+ - lts/*
+install:
+- npm install
+- npm install coveralls
+script:
+- npm run coverage
+after_success:
+- npx nyc report --reporter=text-lcov | npx coveralls
diff --git a/testing/xpcshell/dns-packet/CHANGELOG.md b/testing/xpcshell/dns-packet/CHANGELOG.md
new file mode 100644
index 0000000000..6b714e04c9
--- /dev/null
+++ b/testing/xpcshell/dns-packet/CHANGELOG.md
@@ -0,0 +1,30 @@
+# Version 5.2.0 - 2019-02-21
+
+- Feature: Added support for de/encoding certain OPT options.
+
+# Version 5.1.0 - 2019-01-22
+
+- Feature: Added support for the RP record type.
+
+# Version 5.0.0 - 2018-06-01
+
+- Breaking: Node.js 6.0.0 or greater is now required.
+- Feature: Added support for DNSSEC record types.
+
+# Version 4.1.0 - 2018-02-11
+
+- Feature: Added support for the MX record type.
+
+# Version 4.0.0 - 2018-02-04
+
+- Feature: Added `streamEncode` and `streamDecode` methods for encoding TCP packets.
+- Breaking: Changed the decoded value of TXT records to an array of Buffers. This is to accomodate DNS-SD records which rely on the individual strings record being separated.
+- Breaking: Renamed the `flag_trunc` and `flag_auth` to `flag_tc` and `flag_aa` to match the names of these in the dns standards.
+
+# Version 3.0.0 - 2018-01-12
+
+- Breaking: The `class` option has been changed from integer to string.
+
+# Version 2.0.0 - 2018-01-11
+
+- Breaking: Converted module to ES2015, now requires Node.js 4.0 or greater
diff --git a/testing/xpcshell/dns-packet/LICENSE b/testing/xpcshell/dns-packet/LICENSE
new file mode 100644
index 0000000000..bae9da7bfa
--- /dev/null
+++ b/testing/xpcshell/dns-packet/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Mathias Buus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/testing/xpcshell/dns-packet/README.md b/testing/xpcshell/dns-packet/README.md
new file mode 100644
index 0000000000..2a729b3d10
--- /dev/null
+++ b/testing/xpcshell/dns-packet/README.md
@@ -0,0 +1,365 @@
+# dns-packet
+[![](https://img.shields.io/npm/v/dns-packet.svg?style=flat)](https://www.npmjs.org/package/dns-packet) [![](https://img.shields.io/npm/dm/dns-packet.svg)](https://www.npmjs.org/package/dns-packet) [![](https://api.travis-ci.org/mafintosh/dns-packet.svg?style=flat)](https://travis-ci.org/mafintosh/dns-packet) [![Coverage Status](https://coveralls.io/repos/github/mafintosh/dns-packet/badge.svg?branch=master)](https://coveralls.io/github/mafintosh/dns-packet?branch=master)
+
+An [abstract-encoding](https://github.com/mafintosh/abstract-encoding) compliant module for encoding / decoding DNS packets. Lifted out of [multicast-dns](https://github.com/mafintosh/multicast-dns) as a separate module.
+
+```
+npm install dns-packet
+```
+
+## UDP Usage
+
+``` js
+const dnsPacket = require('dns-packet')
+const dgram = require('dgram')
+
+const socket = dgram.createSocket('udp4')
+
+const buf = dnsPacket.encode({
+ type: 'query',
+ id: 1,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+socket.on('message', message => {
+ console.log(dnsPacket.decode(message)) // prints out a response from google dns
+})
+
+socket.send(buf, 0, buf.length, 53, '8.8.8.8')
+```
+
+Also see [the UDP example](examples/udp.js).
+
+## TCP, TLS, HTTPS
+
+While DNS has traditionally been used over a datagram transport, it is increasingly being carried over TCP for larger responses commonly including DNSSEC responses and TLS or HTTPS for enhanced security. See below examples on how to use `dns-packet` to wrap DNS packets in these protocols:
+
+- [TCP](examples/tcp.js)
+- [DNS over TLS](examples/tls.js)
+- [DNS over HTTPS](examples/doh.js)
+
+## API
+
+#### `var buf = packets.encode(packet, [buf], [offset])`
+
+Encodes a DNS packet into a buffer containing a UDP payload.
+
+#### `var packet = packets.decode(buf, [offset])`
+
+Decode a DNS packet from a buffer containing a UDP payload.
+
+#### `var buf = packets.streamEncode(packet, [buf], [offset])`
+
+Encodes a DNS packet into a buffer containing a TCP payload.
+
+#### `var packet = packets.streamDecode(buf, [offset])`
+
+Decode a DNS packet from a buffer containing a TCP payload.
+
+#### `var len = packets.encodingLength(packet)`
+
+Returns how many bytes are needed to encode the DNS packet
+
+## Packets
+
+Packets look like this
+
+``` js
+{
+ type: 'query|response',
+ id: optionalIdNumber,
+ flags: optionalBitFlags,
+ questions: [...],
+ answers: [...],
+ additionals: [...],
+ authorities: [...]
+}
+```
+
+The bit flags available are
+
+``` js
+packet.RECURSION_DESIRED
+packet.RECURSION_AVAILABLE
+packet.TRUNCATED_RESPONSE
+packet.AUTHORITATIVE_ANSWER
+packet.AUTHENTIC_DATA
+packet.CHECKING_DISABLED
+```
+
+To use more than one flag bitwise-or them together
+
+``` js
+var flags = packet.RECURSION_DESIRED | packet.RECURSION_AVAILABLE
+```
+
+And to check for a flag use bitwise-and
+
+``` js
+var isRecursive = message.flags & packet.RECURSION_DESIRED
+```
+
+A question looks like this
+
+``` js
+{
+ type: 'A', // or SRV, AAAA, etc
+ class: 'IN', // one of IN, CS, CH, HS, ANY. Default: IN
+ name: 'google.com' // which record are you looking for
+}
+```
+
+And an answer, additional, or authority looks like this
+
+``` js
+{
+ type: 'A', // or SRV, AAAA, etc
+ class: 'IN', // one of IN, CS, CH, HS
+ name: 'google.com', // which name is this record for
+ ttl: optionalTimeToLiveInSeconds,
+ (record specific data, see below)
+}
+```
+
+## Supported record types
+
+#### `A`
+
+``` js
+{
+ data: 'IPv4 address' // fx 127.0.0.1
+}
+```
+
+#### `AAAA`
+
+``` js
+{
+ data: 'IPv6 address' // fx fe80::1
+}
+```
+
+#### `CAA`
+
+``` js
+{
+ flags: 128, // octet
+ tag: 'issue|issuewild|iodef',
+ value: 'ca.example.net',
+ issuerCritical: false
+}
+```
+
+#### `CNAME`
+
+``` js
+{
+ data: 'cname.to.another.record'
+}
+```
+
+#### `DNAME`
+
+``` js
+{
+ data: 'dname.to.another.record'
+}
+```
+
+#### `DNSKEY`
+
+``` js
+{
+ flags: 257, // 16 bits
+ algorithm: 1, // octet
+ key: Buffer
+}
+```
+
+#### `DS`
+
+``` js
+{
+ keyTag: 12345,
+ algorithm: 8,
+ digestType: 1,
+ digest: Buffer
+}
+```
+
+#### `HINFO`
+
+``` js
+{
+ data: {
+ cpu: 'cpu info',
+ os: 'os info'
+ }
+}
+```
+
+#### `MX`
+
+``` js
+{
+ preference: 10,
+ exchange: 'mail.example.net'
+}
+```
+
+#### `NS`
+
+``` js
+{
+ data: nameServer
+}
+```
+
+#### `NSEC`
+
+``` js
+{
+ nextDomain: 'a.domain',
+ rrtypes: ['A', 'TXT', 'RRSIG']
+}
+```
+
+#### `NSEC3`
+
+``` js
+{
+ algorithm: 1,
+ flags: 0,
+ iterations: 2,
+ salt: Buffer,
+ nextDomain: Buffer, // Hashed per RFC5155
+ rrtypes: ['A', 'TXT', 'RRSIG']
+}
+```
+
+#### `NULL`
+
+``` js
+{
+ data: Buffer('any binary data')
+}
+```
+
+#### `OPT`
+
+[EDNS0](https://tools.ietf.org/html/rfc6891) options.
+
+``` js
+{
+ type: 'OPT',
+ name: '.',
+ udpPayloadSize: 4096,
+ flags: packet.DNSSEC_OK,
+ options: [{
+ // pass in any code/data for generic EDNS0 options
+ code: 12,
+ data: Buffer.alloc(31)
+ }, {
+ // Several EDNS0 options have enhanced support
+ code: 'PADDING',
+ length: 31,
+ }, {
+ code: 'CLIENT_SUBNET',
+ family: 2, // 1 for IPv4, 2 for IPv6
+ sourcePrefixLength: 64, // used to truncate IP address
+ scopePrefixLength: 0,
+ ip: 'fe80::',
+ }, {
+ code: 'TCP_KEEPALIVE',
+ timeout: 150 // increments of 100ms. This means 15s.
+ }, {
+ code: 'KEY_TAG',
+ tags: [1, 2, 3],
+ }]
+}
+```
+
+The options `PADDING`, `CLIENT_SUBNET`, `TCP_KEEPALIVE` and `KEY_TAG` support enhanced de/encoding. See [optionscodes.js](https://github.com/mafintosh/dns-packet/blob/master/optioncodes.js) for all supported option codes. If the `data` property is present on a option, it takes precedence. On decoding, `data` will always be defined.
+
+#### `PTR`
+
+``` js
+{
+ data: 'points.to.another.record'
+}
+```
+
+#### `RP`
+
+``` js
+{
+ mbox: 'admin.example.com',
+ txt: 'txt.example.com'
+}
+```
+
+#### `RRSIG`
+
+``` js
+{
+ typeCovered: 'A',
+ algorithm: 8,
+ labels: 1,
+ originalTTL: 3600,
+ expiration: timestamp,
+ inception: timestamp,
+ keyTag: 12345,
+ signersName: 'a.name',
+ signature: Buffer
+}
+```
+
+#### `SOA`
+
+``` js
+{
+ data:
+ {
+ mname: domainName,
+ rname: mailbox,
+ serial: zoneSerial,
+ refresh: refreshInterval,
+ retry: retryInterval,
+ expire: expireInterval,
+ minimum: minimumTTL
+ }
+}
+```
+
+#### `SRV`
+
+``` js
+{
+ data: {
+ port: servicePort,
+ target: serviceHostName,
+ priority: optionalServicePriority,
+ weight: optionalServiceWeight
+ }
+}
+```
+
+#### `TXT`
+
+``` js
+{
+ data: 'text' || Buffer || [ Buffer || 'text' ]
+}
+```
+
+When encoding, scalar values are converted to an array and strings are converted to UTF-8 encoded Buffers. When decoding, the return value will always be an array of Buffer.
+
+If you need another record type, open an issue and we'll try to add it.
+
+## License
+
+MIT
diff --git a/testing/xpcshell/dns-packet/classes.js b/testing/xpcshell/dns-packet/classes.js
new file mode 100644
index 0000000000..9a3d9b1e8c
--- /dev/null
+++ b/testing/xpcshell/dns-packet/classes.js
@@ -0,0 +1,23 @@
+'use strict'
+
+exports.toString = function (klass) {
+ switch (klass) {
+ case 1: return 'IN'
+ case 2: return 'CS'
+ case 3: return 'CH'
+ case 4: return 'HS'
+ case 255: return 'ANY'
+ }
+ return 'UNKNOWN_' + klass
+}
+
+exports.toClass = function (name) {
+ switch (name.toUpperCase()) {
+ case 'IN': return 1
+ case 'CS': return 2
+ case 'CH': return 3
+ case 'HS': return 4
+ case 'ANY': return 255
+ }
+ return 0
+}
diff --git a/testing/xpcshell/dns-packet/examples/doh.js b/testing/xpcshell/dns-packet/examples/doh.js
new file mode 100644
index 0000000000..37ef19fc35
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/doh.js
@@ -0,0 +1,52 @@
+
+'use strict'
+
+/*
+ * Sample code to make DNS over HTTPS request using POST
+ * AUTHOR: Tom Pusateri <pusateri@bangj.com>
+ * DATE: March 17, 2018
+ * LICENSE: MIT
+ */
+
+const dnsPacket = require('..')
+const https = require('https')
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.encode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+const options = {
+ hostname: 'dns.google.com',
+ port: 443,
+ path: '/experimental',
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/dns-udpwireformat',
+ 'Content-Length': Buffer.byteLength(buf)
+ }
+}
+
+const request = https.request(options, (response) => {
+ console.log('statusCode:', response.statusCode)
+ console.log('headers:', response.headers)
+
+ response.on('data', (d) => {
+ console.log(dnsPacket.decode(d))
+ })
+})
+
+request.on('error', (e) => {
+ console.error(e)
+})
+request.write(buf)
+request.end()
diff --git a/testing/xpcshell/dns-packet/examples/tcp.js b/testing/xpcshell/dns-packet/examples/tcp.js
new file mode 100644
index 0000000000..b25c2c41cb
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/tcp.js
@@ -0,0 +1,52 @@
+'use strict'
+
+const dnsPacket = require('..')
+const net = require('net')
+
+var response = null
+var expectedLength = 0
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.streamEncode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+const client = new net.Socket()
+client.connect(53, '8.8.8.8', function () {
+ console.log('Connected')
+ client.write(buf)
+})
+
+client.on('data', function (data) {
+ console.log('Received response: %d bytes', data.byteLength)
+ if (response == null) {
+ if (data.byteLength > 1) {
+ const plen = data.readUInt16BE(0)
+ expectedLength = plen
+ if (plen < 12) {
+ throw new Error('below DNS minimum packet length')
+ }
+ response = Buffer.from(data)
+ }
+ } else {
+ response = Buffer.concat([response, data])
+ }
+
+ if (response.byteLength >= expectedLength) {
+ console.log(dnsPacket.streamDecode(response))
+ client.destroy()
+ }
+})
+
+client.on('close', function () {
+ console.log('Connection closed')
+})
diff --git a/testing/xpcshell/dns-packet/examples/tls.js b/testing/xpcshell/dns-packet/examples/tls.js
new file mode 100644
index 0000000000..694a4fecfa
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/tls.js
@@ -0,0 +1,61 @@
+'use strict'
+
+const tls = require('tls')
+const dnsPacket = require('..')
+
+var response = null
+var expectedLength = 0
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.streamEncode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+const context = tls.createSecureContext({
+ secureProtocol: 'TLSv1_2_method'
+})
+
+const options = {
+ port: 853,
+ host: 'getdnsapi.net',
+ secureContext: context
+}
+
+const client = tls.connect(options, () => {
+ console.log('client connected')
+ client.write(buf)
+})
+
+client.on('data', function (data) {
+ console.log('Received response: %d bytes', data.byteLength)
+ if (response == null) {
+ if (data.byteLength > 1) {
+ const plen = data.readUInt16BE(0)
+ expectedLength = plen
+ if (plen < 12) {
+ throw new Error('below DNS minimum packet length')
+ }
+ response = Buffer.from(data)
+ }
+ } else {
+ response = Buffer.concat([response, data])
+ }
+
+ if (response.byteLength >= expectedLength) {
+ console.log(dnsPacket.streamDecode(response))
+ client.destroy()
+ }
+})
+
+client.on('end', () => {
+ console.log('Connection ended')
+})
diff --git a/testing/xpcshell/dns-packet/examples/udp.js b/testing/xpcshell/dns-packet/examples/udp.js
new file mode 100644
index 0000000000..0f9df9d794
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/udp.js
@@ -0,0 +1,28 @@
+'use strict'
+
+const dnsPacket = require('..')
+const dgram = require('dgram')
+
+const socket = dgram.createSocket('udp4')
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.encode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+socket.on('message', function (message, rinfo) {
+ console.log(rinfo)
+ console.log(dnsPacket.decode(message)) // prints out a response from google dns
+ socket.close()
+})
+
+socket.send(buf, 0, buf.length, 53, '8.8.8.8')
diff --git a/testing/xpcshell/dns-packet/index.js b/testing/xpcshell/dns-packet/index.js
new file mode 100644
index 0000000000..26b214ef4e
--- /dev/null
+++ b/testing/xpcshell/dns-packet/index.js
@@ -0,0 +1,1841 @@
+'use strict'
+
+const types = require('./types')
+const rcodes = require('./rcodes')
+exports.rcodes = rcodes;
+const opcodes = require('./opcodes')
+const classes = require('./classes')
+const optioncodes = require('./optioncodes')
+const ip = require('../node-ip')
+
+const QUERY_FLAG = 0
+const RESPONSE_FLAG = 1 << 15
+const FLUSH_MASK = 1 << 15
+const NOT_FLUSH_MASK = ~FLUSH_MASK
+const QU_MASK = 1 << 15
+const NOT_QU_MASK = ~QU_MASK
+
+const name = exports.txt = exports.name = {}
+
+name.encode = function (str, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(name.encodingLength(str))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ // strip leading and trailing .
+ const n = str.replace(/^\.|\.$/gm, '')
+ if (n.length) {
+ const list = n.split('.')
+
+ for (let i = 0; i < list.length; i++) {
+ const len = buf.write(list[i], offset + 1)
+ buf[offset] = len
+ offset += len + 1
+ }
+ }
+
+ buf[offset++] = 0
+
+ name.encode.bytes = offset - oldOffset
+ return buf
+}
+
+name.encode.bytes = 0
+
+name.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const list = []
+ const oldOffset = offset
+ let len = buf[offset++]
+
+ if (len === 0) {
+ name.decode.bytes = 1
+ return '.'
+ }
+ if (len >= 0xc0) {
+ const res = name.decode(buf, buf.readUInt16BE(offset - 1) - 0xc000)
+ name.decode.bytes = 2
+ return res
+ }
+
+ while (len) {
+ if (len >= 0xc0) {
+ list.push(name.decode(buf, buf.readUInt16BE(offset - 1) - 0xc000))
+ offset++
+ break
+ }
+
+ list.push(buf.toString('utf-8', offset, offset + len))
+ offset += len
+ len = buf[offset++]
+ }
+
+ name.decode.bytes = offset - oldOffset
+ return list.join('.')
+}
+
+name.decode.bytes = 0
+
+name.encodingLength = function (n) {
+ if (n === '.') return 1
+ return Buffer.byteLength(n) + 2
+}
+
+const string = {}
+
+string.encode = function (s, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(string.encodingLength(s))
+ if (!offset) offset = 0
+
+ const len = buf.write(s, offset + 1)
+ buf[offset] = len
+ string.encode.bytes = len + 1
+ return buf
+}
+
+string.encode.bytes = 0
+
+string.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf[offset]
+ const s = buf.toString('utf-8', offset + 1, offset + 1 + len)
+ string.decode.bytes = len + 1
+ return s
+}
+
+string.decode.bytes = 0
+
+string.encodingLength = function (s) {
+ return Buffer.byteLength(s) + 1
+}
+
+const header = {}
+
+header.encode = function (h, buf, offset) {
+ if (!buf) buf = header.encodingLength(h)
+ if (!offset) offset = 0
+
+ const flags = (h.flags || 0) & 32767
+ const type = h.type === 'response' ? RESPONSE_FLAG : QUERY_FLAG
+
+ buf.writeUInt16BE(h.id || 0, offset)
+ buf.writeUInt16BE(flags | type, offset + 2)
+ buf.writeUInt16BE(h.questions.length, offset + 4)
+ buf.writeUInt16BE(h.answers.length, offset + 6)
+ buf.writeUInt16BE(h.authorities.length, offset + 8)
+ buf.writeUInt16BE(h.additionals.length, offset + 10)
+
+ return buf
+}
+
+header.encode.bytes = 12
+
+header.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ if (buf.length < 12) throw new Error('Header must be 12 bytes')
+ const flags = buf.readUInt16BE(offset + 2)
+
+ return {
+ id: buf.readUInt16BE(offset),
+ type: flags & RESPONSE_FLAG ? 'response' : 'query',
+ flags: flags & 32767,
+ flag_qr: ((flags >> 15) & 0x1) === 1,
+ opcode: opcodes.toString((flags >> 11) & 0xf),
+ flag_aa: ((flags >> 10) & 0x1) === 1,
+ flag_tc: ((flags >> 9) & 0x1) === 1,
+ flag_rd: ((flags >> 8) & 0x1) === 1,
+ flag_ra: ((flags >> 7) & 0x1) === 1,
+ flag_z: ((flags >> 6) & 0x1) === 1,
+ flag_ad: ((flags >> 5) & 0x1) === 1,
+ flag_cd: ((flags >> 4) & 0x1) === 1,
+ rcode: rcodes.toString(flags & 0xf),
+ questions: new Array(buf.readUInt16BE(offset + 4)),
+ answers: new Array(buf.readUInt16BE(offset + 6)),
+ authorities: new Array(buf.readUInt16BE(offset + 8)),
+ additionals: new Array(buf.readUInt16BE(offset + 10))
+ }
+}
+
+header.decode.bytes = 12
+
+header.encodingLength = function () {
+ return 12
+}
+
+const runknown = exports.unknown = {}
+
+runknown.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(runknown.encodingLength(data))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(data.length, offset)
+ data.copy(buf, offset + 2)
+
+ runknown.encode.bytes = data.length + 2
+ return buf
+}
+
+runknown.encode.bytes = 0
+
+runknown.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+ const data = buf.slice(offset + 2, offset + 2 + len)
+ runknown.decode.bytes = len + 2
+ return data
+}
+
+runknown.decode.bytes = 0
+
+runknown.encodingLength = function (data) {
+ return data.length + 2
+}
+
+const rns = exports.ns = {}
+
+rns.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rns.encodingLength(data))
+ if (!offset) offset = 0
+
+ name.encode(data, buf, offset + 2)
+ buf.writeUInt16BE(name.encode.bytes, offset)
+ rns.encode.bytes = name.encode.bytes + 2
+ return buf
+}
+
+rns.encode.bytes = 0
+
+rns.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+ const dd = name.decode(buf, offset + 2)
+
+ rns.decode.bytes = len + 2
+ return dd
+}
+
+rns.decode.bytes = 0
+
+rns.encodingLength = function (data) {
+ return name.encodingLength(data) + 2
+}
+
+const rsoa = exports.soa = {}
+
+rsoa.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rsoa.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+ name.encode(data.mname, buf, offset)
+ offset += name.encode.bytes
+ name.encode(data.rname, buf, offset)
+ offset += name.encode.bytes
+ buf.writeUInt32BE(data.serial || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.refresh || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.retry || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.expire || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.minimum || 0, offset)
+ offset += 4
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rsoa.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rsoa.encode.bytes = 0
+
+rsoa.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.mname = name.decode(buf, offset)
+ offset += name.decode.bytes
+ data.rname = name.decode(buf, offset)
+ offset += name.decode.bytes
+ data.serial = buf.readUInt32BE(offset)
+ offset += 4
+ data.refresh = buf.readUInt32BE(offset)
+ offset += 4
+ data.retry = buf.readUInt32BE(offset)
+ offset += 4
+ data.expire = buf.readUInt32BE(offset)
+ offset += 4
+ data.minimum = buf.readUInt32BE(offset)
+ offset += 4
+
+ rsoa.decode.bytes = offset - oldOffset
+ return data
+}
+
+rsoa.decode.bytes = 0
+
+rsoa.encodingLength = function (data) {
+ return 22 + name.encodingLength(data.mname) + name.encodingLength(data.rname)
+}
+
+const rtxt = exports.txt = {}
+
+rtxt.encode = function (data, buf, offset) {
+ if (!Array.isArray(data)) data = [data]
+ for (let i = 0; i < data.length; i++) {
+ if (typeof data[i] === 'string') {
+ data[i] = Buffer.from(data[i])
+ }
+ if (!Buffer.isBuffer(data[i])) {
+ throw new Error('Must be a Buffer')
+ }
+ }
+
+ if (!buf) buf = Buffer.allocUnsafe(rtxt.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+
+ data.forEach(function (d) {
+ buf[offset++] = d.length
+ d.copy(buf, offset, 0, d.length)
+ offset += d.length
+ })
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rtxt.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rtxt.encode.bytes = 0
+
+rtxt.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+ let remaining = buf.readUInt16BE(offset)
+ offset += 2
+
+ let data = []
+ while (remaining > 0) {
+ const len = buf[offset++]
+ --remaining
+ if (remaining < len) {
+ throw new Error('Buffer overflow')
+ }
+ data.push(buf.slice(offset, offset + len))
+ offset += len
+ remaining -= len
+ }
+
+ rtxt.decode.bytes = offset - oldOffset
+ return data
+}
+
+rtxt.decode.bytes = 0
+
+rtxt.encodingLength = function (data) {
+ if (!Array.isArray(data)) data = [data]
+ let length = 2
+ data.forEach(function (buf) {
+ if (typeof buf === 'string') {
+ length += Buffer.byteLength(buf) + 1
+ } else {
+ length += buf.length + 1
+ }
+ })
+ return length
+}
+
+const rnull = exports.null = {}
+
+rnull.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rnull.encodingLength(data))
+ if (!offset) offset = 0
+
+ if (typeof data === 'string') data = Buffer.from(data)
+ if (!data) data = Buffer.allocUnsafe(0)
+
+ const oldOffset = offset
+ offset += 2
+
+ const len = data.length
+ data.copy(buf, offset, 0, len)
+ offset += len
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rnull.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rnull.encode.bytes = 0
+
+rnull.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+ const len = buf.readUInt16BE(offset)
+
+ offset += 2
+
+ const data = buf.slice(offset, offset + len)
+ offset += len
+
+ rnull.decode.bytes = offset - oldOffset
+ return data
+}
+
+rnull.decode.bytes = 0
+
+rnull.encodingLength = function (data) {
+ if (!data) return 2
+ return (Buffer.isBuffer(data) ? data.length : Buffer.byteLength(data)) + 2
+}
+
+const rhinfo = exports.hinfo = {}
+
+rhinfo.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rhinfo.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+ string.encode(data.cpu, buf, offset)
+ offset += string.encode.bytes
+ string.encode(data.os, buf, offset)
+ offset += string.encode.bytes
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rhinfo.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rhinfo.encode.bytes = 0
+
+rhinfo.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.cpu = string.decode(buf, offset)
+ offset += string.decode.bytes
+ data.os = string.decode(buf, offset)
+ offset += string.decode.bytes
+ rhinfo.decode.bytes = offset - oldOffset
+ return data
+}
+
+rhinfo.decode.bytes = 0
+
+rhinfo.encodingLength = function (data) {
+ return string.encodingLength(data.cpu) + string.encodingLength(data.os) + 2
+}
+
+const rptr = exports.ptr = {}
+const rcname = exports.cname = rptr
+const rdname = exports.dname = rptr
+
+rptr.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rptr.encodingLength(data))
+ if (!offset) offset = 0
+
+ name.encode(data, buf, offset + 2)
+ buf.writeUInt16BE(name.encode.bytes, offset)
+ rptr.encode.bytes = name.encode.bytes + 2
+ return buf
+}
+
+rptr.encode.bytes = 0
+
+rptr.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const data = name.decode(buf, offset + 2)
+ rptr.decode.bytes = name.decode.bytes + 2
+ return data
+}
+
+rptr.decode.bytes = 0
+
+rptr.encodingLength = function (data) {
+ return name.encodingLength(data) + 2
+}
+
+const rsrv = exports.srv = {}
+
+rsrv.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rsrv.encodingLength(data))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(data.priority || 0, offset + 2)
+ buf.writeUInt16BE(data.weight || 0, offset + 4)
+ buf.writeUInt16BE(data.port || 0, offset + 6)
+ name.encode(data.target, buf, offset + 8)
+
+ const len = name.encode.bytes + 6
+ buf.writeUInt16BE(len, offset)
+
+ rsrv.encode.bytes = len + 2
+ return buf
+}
+
+rsrv.encode.bytes = 0
+
+rsrv.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+
+ const data = {}
+ data.priority = buf.readUInt16BE(offset + 2)
+ data.weight = buf.readUInt16BE(offset + 4)
+ data.port = buf.readUInt16BE(offset + 6)
+ data.target = name.decode(buf, offset + 8)
+
+ rsrv.decode.bytes = len + 2
+ return data
+}
+
+rsrv.decode.bytes = 0
+
+rsrv.encodingLength = function (data) {
+ return 8 + name.encodingLength(data.target)
+}
+
+const rcaa = exports.caa = {}
+
+rcaa.ISSUER_CRITICAL = 1 << 7
+
+rcaa.encode = function (data, buf, offset) {
+ const len = rcaa.encodingLength(data)
+
+ if (!buf) buf = Buffer.allocUnsafe(rcaa.encodingLength(data))
+ if (!offset) offset = 0
+
+ if (data.issuerCritical) {
+ data.flags = rcaa.ISSUER_CRITICAL
+ }
+
+ buf.writeUInt16BE(len - 2, offset)
+ offset += 2
+ buf.writeUInt8(data.flags || 0, offset)
+ offset += 1
+ string.encode(data.tag, buf, offset)
+ offset += string.encode.bytes
+ buf.write(data.value, offset)
+ offset += Buffer.byteLength(data.value)
+
+ rcaa.encode.bytes = len
+ return buf
+}
+
+rcaa.encode.bytes = 0
+
+rcaa.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+ offset += 2
+
+ const oldOffset = offset
+ const data = {}
+ data.flags = buf.readUInt8(offset)
+ offset += 1
+ data.tag = string.decode(buf, offset)
+ offset += string.decode.bytes
+ data.value = buf.toString('utf-8', offset, oldOffset + len)
+
+ data.issuerCritical = !!(data.flags & rcaa.ISSUER_CRITICAL)
+
+ rcaa.decode.bytes = len + 2
+
+ return data
+}
+
+rcaa.decode.bytes = 0
+
+rcaa.encodingLength = function (data) {
+ return string.encodingLength(data.tag) + string.encodingLength(data.value) + 2
+}
+
+const rmx = exports.mx = {}
+
+rmx.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rmx.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+ buf.writeUInt16BE(data.preference || 0, offset)
+ offset += 2
+ name.encode(data.exchange, buf, offset)
+ offset += name.encode.bytes
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rmx.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rmx.encode.bytes = 0
+
+rmx.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.preference = buf.readUInt16BE(offset)
+ offset += 2
+ data.exchange = name.decode(buf, offset)
+ offset += name.decode.bytes
+
+ rmx.decode.bytes = offset - oldOffset
+ return data
+}
+
+rmx.encodingLength = function (data) {
+ return 4 + name.encodingLength(data.exchange)
+}
+
+const ra = exports.a = {}
+
+ra.encode = function (host, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(ra.encodingLength(host))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(4, offset)
+ offset += 2
+ ip.toBuffer(host, buf, offset)
+ ra.encode.bytes = 6
+ return buf
+}
+
+ra.encode.bytes = 0
+
+ra.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ offset += 2
+ const host = ip.toString(buf, offset, 4)
+ ra.decode.bytes = 6
+ return host
+}
+ra.decode.bytes = 0
+
+ra.encodingLength = function () {
+ return 6
+}
+
+const raaaa = exports.aaaa = {}
+
+raaaa.encode = function (host, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(raaaa.encodingLength(host))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(16, offset)
+ offset += 2
+ ip.toBuffer(host, buf, offset)
+ raaaa.encode.bytes = 18
+ return buf
+}
+
+raaaa.encode.bytes = 0
+
+raaaa.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ offset += 2
+ const host = ip.toString(buf, offset, 16)
+ raaaa.decode.bytes = 18
+ return host
+}
+
+raaaa.decode.bytes = 0
+
+raaaa.encodingLength = function () {
+ return 18
+}
+
+const roption = exports.option = {}
+
+roption.encode = function (option, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(roption.encodingLength(option))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const code = optioncodes.toCode(option.code)
+ buf.writeUInt16BE(code, offset)
+ offset += 2
+ if (option.data) {
+ buf.writeUInt16BE(option.data.length, offset)
+ offset += 2
+ option.data.copy(buf, offset)
+ offset += option.data.length
+ } else {
+ switch (code) {
+ // case 3: NSID. No encode makes sense.
+ // case 5,6,7: Not implementable
+ case 8: // ECS
+ // note: do IP math before calling
+ const spl = option.sourcePrefixLength || 0
+ const fam = option.family || (ip.isV4Format(option.ip) ? 1 : 2)
+ const ipBuf = ip.toBuffer(option.ip)
+ const ipLen = Math.ceil(spl / 8)
+ buf.writeUInt16BE(ipLen + 4, offset)
+ offset += 2
+ buf.writeUInt16BE(fam, offset)
+ offset += 2
+ buf.writeUInt8(spl, offset++)
+ buf.writeUInt8(option.scopePrefixLength || 0, offset++)
+
+ ipBuf.copy(buf, offset, 0, ipLen)
+ offset += ipLen
+ break
+ // case 9: EXPIRE (experimental)
+ // case 10: COOKIE. No encode makes sense.
+ case 11: // KEEP-ALIVE
+ if (option.timeout) {
+ buf.writeUInt16BE(2, offset)
+ offset += 2
+ buf.writeUInt16BE(option.timeout, offset)
+ offset += 2
+ } else {
+ buf.writeUInt16BE(0, offset)
+ offset += 2
+ }
+ break
+ case 12: // PADDING
+ const len = option.length || 0
+ buf.writeUInt16BE(len, offset)
+ offset += 2
+ buf.fill(0, offset, offset + len)
+ offset += len
+ break
+ // case 13: CHAIN. Experimental.
+ case 14: // KEY-TAG
+ const tagsLen = option.tags.length * 2
+ buf.writeUInt16BE(tagsLen, offset)
+ offset += 2
+ for (const tag of option.tags) {
+ buf.writeUInt16BE(tag, offset)
+ offset += 2
+ }
+ break
+ case 15: // EDNS_ERROR
+ const text = option.text || "";
+ buf.writeUInt16BE(text.length + 2, offset)
+ offset += 2;
+ buf.writeUInt16BE(option.extended_error, offset)
+ offset += 2;
+ buf.write(text, offset);
+ offset += option.text.length;
+ break;
+ default:
+ throw new Error(`Unknown roption code: ${option.code}`)
+ }
+ }
+
+ roption.encode.bytes = offset - oldOffset
+ return buf
+}
+
+roption.encode.bytes = 0
+
+roption.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const option = {}
+ option.code = buf.readUInt16BE(offset)
+ option.type = optioncodes.toString(option.code)
+ offset += 2
+ const len = buf.readUInt16BE(offset)
+ offset += 2
+ option.data = buf.slice(offset, offset + len)
+ switch (option.code) {
+ // case 3: NSID. No decode makes sense.
+ case 8: // ECS
+ option.family = buf.readUInt16BE(offset)
+ offset += 2
+ option.sourcePrefixLength = buf.readUInt8(offset++)
+ option.scopePrefixLength = buf.readUInt8(offset++)
+ const padded = Buffer.alloc((option.family === 1) ? 4 : 16)
+ buf.copy(padded, 0, offset, offset + len - 4)
+ option.ip = ip.toString(padded)
+ break
+ // case 12: Padding. No decode makes sense.
+ case 11: // KEEP-ALIVE
+ if (len > 0) {
+ option.timeout = buf.readUInt16BE(offset)
+ offset += 2
+ }
+ break
+ case 14:
+ option.tags = []
+ for (let i = 0; i < len; i += 2) {
+ option.tags.push(buf.readUInt16BE(offset))
+ offset += 2
+ }
+ // don't worry about default. caller will use data if desired
+ }
+
+ roption.decode.bytes = len + 4
+ return option
+}
+
+roption.decode.bytes = 0
+
+roption.encodingLength = function (option) {
+ if (option.data) {
+ return option.data.length + 4
+ }
+ const code = optioncodes.toCode(option.code)
+ switch (code) {
+ case 8: // ECS
+ const spl = option.sourcePrefixLength || 0
+ return Math.ceil(spl / 8) + 8
+ case 11: // KEEP-ALIVE
+ return (typeof option.timeout === 'number') ? 6 : 4
+ case 12: // PADDING
+ return option.length + 4
+ case 14: // KEY-TAG
+ return 4 + (option.tags.length * 2)
+ case 15: // EDNS_ERROR
+ return 4 + 2 + option.text.length
+ }
+ throw new Error(`Unknown roption code: ${option.code}`)
+}
+
+const ropt = exports.opt = {}
+
+ropt.encode = function (options, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(ropt.encodingLength(options))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const rdlen = encodingLengthList(options, roption)
+ buf.writeUInt16BE(rdlen, offset)
+ offset = encodeList(options, roption, buf, offset + 2)
+
+ ropt.encode.bytes = offset - oldOffset
+ return buf
+}
+
+ropt.encode.bytes = 0
+
+ropt.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const options = []
+ let rdlen = buf.readUInt16BE(offset)
+ offset += 2
+ let o = 0
+ while (rdlen > 0) {
+ options[o++] = roption.decode(buf, offset)
+ offset += roption.decode.bytes
+ rdlen -= roption.decode.bytes
+ }
+ ropt.decode.bytes = offset - oldOffset
+ return options
+}
+
+ropt.decode.bytes = 0
+
+ropt.encodingLength = function (options) {
+ return 2 + encodingLengthList(options || [], roption)
+}
+
+const rdnskey = exports.dnskey = {}
+
+rdnskey.PROTOCOL_DNSSEC = 3
+rdnskey.ZONE_KEY = 0x80
+rdnskey.SECURE_ENTRYPOINT = 0x8000
+
+rdnskey.encode = function (key, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rdnskey.encodingLength(key))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const keydata = key.key
+ if (!Buffer.isBuffer(keydata)) {
+ throw new Error('Key must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt16BE(key.flags, offset)
+ offset += 2
+ buf.writeUInt8(rdnskey.PROTOCOL_DNSSEC, offset)
+ offset += 1
+ buf.writeUInt8(key.algorithm, offset)
+ offset += 1
+ keydata.copy(buf, offset, 0, keydata.length)
+ offset += keydata.length
+
+ rdnskey.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rdnskey.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rdnskey.encode.bytes = 0
+
+rdnskey.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var key = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ key.flags = buf.readUInt16BE(offset)
+ offset += 2
+ if (buf.readUInt8(offset) !== rdnskey.PROTOCOL_DNSSEC) {
+ throw new Error('Protocol must be 3')
+ }
+ offset += 1
+ key.algorithm = buf.readUInt8(offset)
+ offset += 1
+ key.key = buf.slice(offset, oldOffset + length + 2)
+ offset += key.key.length
+ rdnskey.decode.bytes = offset - oldOffset
+ return key
+}
+
+rdnskey.decode.bytes = 0
+
+rdnskey.encodingLength = function (key) {
+ return 6 + Buffer.byteLength(key.key)
+}
+
+const rrrsig = exports.rrsig = {}
+
+rrrsig.encode = function (sig, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rrrsig.encodingLength(sig))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const signature = sig.signature
+ if (!Buffer.isBuffer(signature)) {
+ throw new Error('Signature must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt16BE(types.toType(sig.typeCovered), offset)
+ offset += 2
+ buf.writeUInt8(sig.algorithm, offset)
+ offset += 1
+ buf.writeUInt8(sig.labels, offset)
+ offset += 1
+ buf.writeUInt32BE(sig.originalTTL, offset)
+ offset += 4
+ buf.writeUInt32BE(sig.expiration, offset)
+ offset += 4
+ buf.writeUInt32BE(sig.inception, offset)
+ offset += 4
+ buf.writeUInt16BE(sig.keyTag, offset)
+ offset += 2
+ name.encode(sig.signersName, buf, offset)
+ offset += name.encode.bytes
+ signature.copy(buf, offset, 0, signature.length)
+ offset += signature.length
+
+ rrrsig.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rrrsig.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rrrsig.encode.bytes = 0
+
+rrrsig.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var sig = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ sig.typeCovered = types.toString(buf.readUInt16BE(offset))
+ offset += 2
+ sig.algorithm = buf.readUInt8(offset)
+ offset += 1
+ sig.labels = buf.readUInt8(offset)
+ offset += 1
+ sig.originalTTL = buf.readUInt32BE(offset)
+ offset += 4
+ sig.expiration = buf.readUInt32BE(offset)
+ offset += 4
+ sig.inception = buf.readUInt32BE(offset)
+ offset += 4
+ sig.keyTag = buf.readUInt16BE(offset)
+ offset += 2
+ sig.signersName = name.decode(buf, offset)
+ offset += name.decode.bytes
+ sig.signature = buf.slice(offset, oldOffset + length + 2)
+ offset += sig.signature.length
+ rrrsig.decode.bytes = offset - oldOffset
+ return sig
+}
+
+rrrsig.decode.bytes = 0
+
+rrrsig.encodingLength = function (sig) {
+ return 20 +
+ name.encodingLength(sig.signersName) +
+ Buffer.byteLength(sig.signature)
+}
+
+const rrp = exports.rp = {}
+
+rrp.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rrp.encodingLength(data))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ offset += 2 // Leave space for length
+ name.encode(data.mbox || '.', buf, offset)
+ offset += name.encode.bytes
+ name.encode(data.txt || '.', buf, offset)
+ offset += name.encode.bytes
+ rrp.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rrp.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rrp.encode.bytes = 0
+
+rrp.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.mbox = name.decode(buf, offset) || '.'
+ offset += name.decode.bytes
+ data.txt = name.decode(buf, offset) || '.'
+ offset += name.decode.bytes
+ rrp.decode.bytes = offset - oldOffset
+ return data
+}
+
+rrp.decode.bytes = 0
+
+rrp.encodingLength = function (data) {
+ return 2 + name.encodingLength(data.mbox || '.') + name.encodingLength(data.txt || '.')
+}
+
+const typebitmap = {}
+
+typebitmap.encode = function (typelist, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(typebitmap.encodingLength(typelist))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var typesByWindow = []
+ for (var i = 0; i < typelist.length; i++) {
+ var typeid = types.toType(typelist[i])
+ if (typesByWindow[typeid >> 8] === undefined) {
+ typesByWindow[typeid >> 8] = []
+ }
+ typesByWindow[typeid >> 8][(typeid >> 3) & 0x1F] |= 1 << (7 - (typeid & 0x7))
+ }
+
+ for (i = 0; i < typesByWindow.length; i++) {
+ if (typesByWindow[i] !== undefined) {
+ var windowBuf = Buffer.from(typesByWindow[i])
+ buf.writeUInt8(i, offset)
+ offset += 1
+ buf.writeUInt8(windowBuf.length, offset)
+ offset += 1
+ windowBuf.copy(buf, offset)
+ offset += windowBuf.length
+ }
+ }
+
+ typebitmap.encode.bytes = offset - oldOffset
+ return buf
+}
+
+typebitmap.encode.bytes = 0
+
+typebitmap.decode = function (buf, offset, length) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var typelist = []
+ while (offset - oldOffset < length) {
+ var window = buf.readUInt8(offset)
+ offset += 1
+ var windowLength = buf.readUInt8(offset)
+ offset += 1
+ for (var i = 0; i < windowLength; i++) {
+ var b = buf.readUInt8(offset + i)
+ for (var j = 0; j < 8; j++) {
+ if (b & (1 << (7 - j))) {
+ var typeid = types.toString((window << 8) | (i << 3) | j)
+ typelist.push(typeid)
+ }
+ }
+ }
+ offset += windowLength
+ }
+
+ typebitmap.decode.bytes = offset - oldOffset
+ return typelist
+}
+
+typebitmap.decode.bytes = 0
+
+typebitmap.encodingLength = function (typelist) {
+ var extents = []
+ for (var i = 0; i < typelist.length; i++) {
+ var typeid = types.toType(typelist[i])
+ extents[typeid >> 8] = Math.max(extents[typeid >> 8] || 0, typeid & 0xFF)
+ }
+
+ var len = 0
+ for (i = 0; i < extents.length; i++) {
+ if (extents[i] !== undefined) {
+ len += 2 + Math.ceil((extents[i] + 1) / 8)
+ }
+ }
+
+ return len
+}
+
+const rnsec = exports.nsec = {}
+
+rnsec.encode = function (record, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rnsec.encodingLength(record))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ offset += 2 // Leave space for length
+ name.encode(record.nextDomain, buf, offset)
+ offset += name.encode.bytes
+ typebitmap.encode(record.rrtypes, buf, offset)
+ offset += typebitmap.encode.bytes
+
+ rnsec.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rnsec.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rnsec.encode.bytes = 0
+
+rnsec.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var record = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ record.nextDomain = name.decode(buf, offset)
+ offset += name.decode.bytes
+ record.rrtypes = typebitmap.decode(buf, offset, length - (offset - oldOffset))
+ offset += typebitmap.decode.bytes
+
+ rnsec.decode.bytes = offset - oldOffset
+ return record
+}
+
+rnsec.decode.bytes = 0
+
+rnsec.encodingLength = function (record) {
+ return 2 +
+ name.encodingLength(record.nextDomain) +
+ typebitmap.encodingLength(record.rrtypes)
+}
+
+const rnsec3 = exports.nsec3 = {}
+
+rnsec3.encode = function (record, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rnsec3.encodingLength(record))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const salt = record.salt
+ if (!Buffer.isBuffer(salt)) {
+ throw new Error('salt must be a Buffer')
+ }
+
+ const nextDomain = record.nextDomain
+ if (!Buffer.isBuffer(nextDomain)) {
+ throw new Error('nextDomain must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt8(record.algorithm, offset)
+ offset += 1
+ buf.writeUInt8(record.flags, offset)
+ offset += 1
+ buf.writeUInt16BE(record.iterations, offset)
+ offset += 2
+ buf.writeUInt8(salt.length, offset)
+ offset += 1
+ salt.copy(buf, offset, 0, salt.length)
+ offset += salt.length
+ buf.writeUInt8(nextDomain.length, offset)
+ offset += 1
+ nextDomain.copy(buf, offset, 0, nextDomain.length)
+ offset += nextDomain.length
+ typebitmap.encode(record.rrtypes, buf, offset)
+ offset += typebitmap.encode.bytes
+
+ rnsec3.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rnsec3.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rnsec3.encode.bytes = 0
+
+rnsec3.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var record = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ record.algorithm = buf.readUInt8(offset)
+ offset += 1
+ record.flags = buf.readUInt8(offset)
+ offset += 1
+ record.iterations = buf.readUInt16BE(offset)
+ offset += 2
+ const saltLength = buf.readUInt8(offset)
+ offset += 1
+ record.salt = buf.slice(offset, offset + saltLength)
+ offset += saltLength
+ const hashLength = buf.readUInt8(offset)
+ offset += 1
+ record.nextDomain = buf.slice(offset, offset + hashLength)
+ offset += hashLength
+ record.rrtypes = typebitmap.decode(buf, offset, length - (offset - oldOffset))
+ offset += typebitmap.decode.bytes
+
+ rnsec3.decode.bytes = offset - oldOffset
+ return record
+}
+
+rnsec3.decode.bytes = 0
+
+rnsec3.encodingLength = function (record) {
+ return 8 +
+ record.salt.length +
+ record.nextDomain.length +
+ typebitmap.encodingLength(record.rrtypes)
+}
+
+const rds = exports.ds = {}
+
+rds.encode = function (digest, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rds.encodingLength(digest))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const digestdata = digest.digest
+ if (!Buffer.isBuffer(digestdata)) {
+ throw new Error('Digest must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt16BE(digest.keyTag, offset)
+ offset += 2
+ buf.writeUInt8(digest.algorithm, offset)
+ offset += 1
+ buf.writeUInt8(digest.digestType, offset)
+ offset += 1
+ digestdata.copy(buf, offset, 0, digestdata.length)
+ offset += digestdata.length
+
+ rds.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rds.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rds.encode.bytes = 0
+
+rds.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var digest = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ digest.keyTag = buf.readUInt16BE(offset)
+ offset += 2
+ digest.algorithm = buf.readUInt8(offset)
+ offset += 1
+ digest.digestType = buf.readUInt8(offset)
+ offset += 1
+ digest.digest = buf.slice(offset, oldOffset + length + 2)
+ offset += digest.digest.length
+ rds.decode.bytes = offset - oldOffset
+ return digest
+}
+
+rds.decode.bytes = 0
+
+rds.encodingLength = function (digest) {
+ return 6 + Buffer.byteLength(digest.digest)
+}
+
+const svcparam = exports.svcparam = {}
+
+svcparam.keyToNumber = function(keyName) {
+ switch (keyName.toLowerCase()) {
+ case 'mandatory': return 0
+ case 'alpn' : return 1
+ case 'no-default-alpn' : return 2
+ case 'port' : return 3
+ case 'ipv4hint' : return 4
+ case 'echconfig' : return 5
+ case 'ipv6hint' : return 6
+ case 'odoh' : return 32769
+ case 'key65535' : return 65535
+ }
+ if (!keyName.startsWith('key')) {
+ throw new Error(`Name must start with key: ${keyName}`);
+ }
+
+ return Number.parseInt(keyName.substring(3));
+}
+
+svcparam.numberToKeyName = function(number) {
+ switch (number) {
+ case 0 : return 'mandatory'
+ case 1 : return 'alpn'
+ case 2 : return 'no-default-alpn'
+ case 3 : return 'port'
+ case 4 : return 'ipv4hint'
+ case 5 : return 'echconfig'
+ case 6 : return 'ipv6hint'
+ case 32769 : return 'odoh'
+ }
+
+ return `key${number}`;
+}
+
+svcparam.encode = function(param, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(svcparam.encodingLength(param))
+ if (!offset) offset = 0
+
+ let key = param.key;
+ if (typeof param.key !== 'number') {
+ key = svcparam.keyToNumber(param.key);
+ }
+
+ buf.writeUInt16BE(key || 0, offset)
+ offset += 2;
+ svcparam.encode.bytes = 2;
+
+ if (key == 0) { // mandatory
+ let values = param.value;
+ if (!Array.isArray(values)) values = [values];
+ buf.writeUInt16BE(values.length*2, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let val of values) {
+ if (typeof val !== 'number') {
+ val = svcparam.keyToNumber(val);
+ }
+ buf.writeUInt16BE(val, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ }
+ } else if (key == 1) { // alpn
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ // The alpn param is prefixed by its length as a single byte, so the
+ // initialValue to reduce function is the length of the array.
+ let total = val.reduce(function(result, id) {
+ return result += id.length;
+ }, val.length);
+
+ buf.writeUInt16BE(total, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let id of val) {
+ buf.writeUInt8(id.length, offset);
+ offset += 1;
+ svcparam.encode.bytes += 1;
+
+ buf.write(id, offset);
+ offset += id.length;
+ svcparam.encode.bytes += id.length;
+ }
+ } else if (key == 2) { // no-default-alpn
+ buf.writeUInt16BE(0, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ } else if (key == 3) { // port
+ buf.writeUInt16BE(2, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ buf.writeUInt16BE(param.value || 0, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ } else if (key == 4) { //ipv4hint
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ buf.writeUInt16BE(val.length*4, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let host of val) {
+ ip.toBuffer(host, buf, offset)
+ offset += 4;
+ svcparam.encode.bytes += 4;
+ }
+ } else if (key == 5) { //echconfig
+ if (svcparam.ech) {
+ buf.writeUInt16BE(svcparam.ech.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ for (let i = 0; i < svcparam.ech.length; i++) {
+ buf.writeUInt8(svcparam.ech[i], offset);
+ offset++;
+ }
+ svcparam.encode.bytes += svcparam.ech.length;
+ } else {
+ buf.writeUInt16BE(param.value.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ buf.write(param.value, offset);
+ offset += param.value.length;
+ svcparam.encode.bytes += param.value.length;
+ }
+ } else if (key == 6) { //ipv6hint
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ buf.writeUInt16BE(val.length*16, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let host of val) {
+ ip.toBuffer(host, buf, offset)
+ offset += 16;
+ svcparam.encode.bytes += 16;
+ }
+ } else if (key == 32769) { //odoh
+ if (svcparam.odoh) {
+ buf.writeUInt16BE(svcparam.odoh.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ for (let i = 0; i < svcparam.odoh.length; i++) {
+ buf.writeUInt8(svcparam.odoh[i], offset);
+ offset++;
+ }
+ svcparam.encode.bytes += svcparam.odoh.length;
+ svcparam.odoh = null;
+ } else {
+ buf.writeUInt16BE(param.value.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ buf.write(param.value, offset);
+ offset += param.value.length;
+ svcparam.encode.bytes += param.value.length;
+ }
+ } else {
+ // Unknown option
+ buf.writeUInt16BE(0, offset); // 0 length since we don't know how to encode
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ }
+
+}
+
+svcparam.encode.bytes = 0;
+
+svcparam.decode = function (buf, offset) {
+ let param = {};
+ let id = buf.readUInt16BE(offset);
+ param.key = svcparam.numberToKeyName(id);
+ offset += 2;
+ svcparam.decode.bytes = 2;
+
+ let len = buf.readUInt16BE(offset);
+ offset += 2;
+ svcparam.decode.bytes += 2;
+
+ param.value = buf.toString('utf-8', offset, offset + len);
+ offset += len;
+ svcparam.decode.bytes += len;
+
+ return param;
+}
+
+svcparam.decode.bytes = 0;
+
+svcparam.encodingLength = function (param) {
+ // 2 bytes for type, 2 bytes for length, what's left for the value
+
+ switch (param.key) {
+ case 'mandatory' : return 4 + 2*(Array.isArray(param.value) ? param.value.length : 1)
+ case 'alpn' : {
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ let total = val.reduce(function(result, id) {
+ return result += id.length;
+ }, val.length);
+ return 4 + total;
+ }
+ case 'no-default-alpn' : return 4
+ case 'port' : return 4 + 2
+ case 'ipv4hint' : return 4 + 4 * (Array.isArray(param.value) ? param.value.length : 1)
+ case 'echconfig' : {
+ if (param.needBase64Decode) {
+ svcparam.ech = Buffer.from(param.value, "base64");
+ return 4 + svcparam.ech.length;
+ }
+ return 4 + param.value.length
+ }
+ case 'ipv6hint' : return 4 + 16 * (Array.isArray(param.value) ? param.value.length : 1)
+ case 'odoh' : {
+ if (param.needBase64Decode) {
+ svcparam.odoh = Buffer.from(param.value, "base64");
+ return 4 + svcparam.odoh.length;
+ }
+ return 4 + param.value.length
+ }
+ case 'key65535' : return 4
+ default: return 4 // unknown option
+ }
+}
+
+const rhttpssvc = exports.httpssvc = {}
+
+rhttpssvc.encode = function(data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rhttpssvc.encodingLength(data))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(rhttpssvc.encodingLength(data) - 2 , offset);
+ offset += 2;
+
+ buf.writeUInt16BE(data.priority || 0, offset);
+ rhttpssvc.encode.bytes = 4;
+ offset += 2;
+ name.encode(data.name, buf, offset);
+ rhttpssvc.encode.bytes += name.encode.bytes;
+ offset += name.encode.bytes;
+
+ if (data.priority == 0) {
+ return;
+ }
+
+ for (let val of data.values) {
+ svcparam.encode(val, buf, offset);
+ offset += svcparam.encode.bytes;
+ rhttpssvc.encode.bytes += svcparam.encode.bytes;
+ }
+
+ return buf;
+}
+
+rhttpssvc.encode.bytes = 0;
+
+rhttpssvc.decode = function (buf, offset) {
+ let rdlen = buf.readUInt16BE(offset);
+ let oldOffset = offset;
+ offset += 2;
+ let record = {}
+ record.priority = buf.readUInt16BE(offset);
+ offset += 2;
+ rhttpssvc.decode.bytes = 4;
+ record.name = name.decode(buf, offset);
+ offset += name.decode.bytes;
+ rhttpssvc.decode.bytes += name.decode.bytes;
+
+ while (rdlen > rhttpssvc.decode.bytes - 2) {
+ let rec1 = svcparam.decode(buf, offset);
+ offset += svcparam.decode.bytes;
+ rhttpssvc.decode.bytes += svcparam.decode.bytes;
+ record.values.push(rec1);
+ }
+
+ return record;
+}
+
+rhttpssvc.decode.bytes = 0;
+
+rhttpssvc.encodingLength = function (data) {
+ let len =
+ 2 + // rdlen
+ 2 + // priority
+ name.encodingLength(data.name);
+ len += data.values.map(svcparam.encodingLength).reduce((acc, len) => acc + len, 0);
+ return len;
+}
+
+const renc = exports.record = function (type) {
+ switch (type.toUpperCase()) {
+ case 'A': return ra
+ case 'PTR': return rptr
+ case 'CNAME': return rcname
+ case 'DNAME': return rdname
+ case 'TXT': return rtxt
+ case 'NULL': return rnull
+ case 'AAAA': return raaaa
+ case 'SRV': return rsrv
+ case 'HINFO': return rhinfo
+ case 'CAA': return rcaa
+ case 'NS': return rns
+ case 'SOA': return rsoa
+ case 'MX': return rmx
+ case 'OPT': return ropt
+ case 'DNSKEY': return rdnskey
+ case 'RRSIG': return rrrsig
+ case 'RP': return rrp
+ case 'NSEC': return rnsec
+ case 'NSEC3': return rnsec3
+ case 'DS': return rds
+ case 'HTTPS': return rhttpssvc
+ }
+ return runknown
+}
+
+const answer = exports.answer = {}
+
+answer.encode = function (a, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(answer.encodingLength(a))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ name.encode(a.name, buf, offset)
+ offset += name.encode.bytes
+
+ buf.writeUInt16BE(types.toType(a.type), offset)
+
+ if (a.type.toUpperCase() === 'OPT') {
+ if (a.name !== '.') {
+ throw new Error('OPT name must be root.')
+ }
+ buf.writeUInt16BE(a.udpPayloadSize || 4096, offset + 2)
+ buf.writeUInt8(a.extendedRcode || 0, offset + 4)
+ buf.writeUInt8(a.ednsVersion || 0, offset + 5)
+ buf.writeUInt16BE(a.flags || 0, offset + 6)
+
+ offset += 8
+ ropt.encode(a.options || [], buf, offset)
+ offset += ropt.encode.bytes
+ } else {
+ let klass = classes.toClass(a.class === undefined ? 'IN' : a.class)
+ if (a.flush) klass |= FLUSH_MASK // the 1st bit of the class is the flush bit
+ buf.writeUInt16BE(klass, offset + 2)
+ buf.writeUInt32BE(a.ttl || 0, offset + 4)
+
+ offset += 8
+ const enc = renc(a.type)
+ enc.encode(a.data, buf, offset)
+ offset += enc.encode.bytes
+ }
+
+ answer.encode.bytes = offset - oldOffset
+ return buf
+}
+
+answer.encode.bytes = 0
+
+answer.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const a = {}
+ const oldOffset = offset
+
+ a.name = name.decode(buf, offset)
+ offset += name.decode.bytes
+ a.type = types.toString(buf.readUInt16BE(offset))
+ if (a.type === 'OPT') {
+ a.udpPayloadSize = buf.readUInt16BE(offset + 2)
+ a.extendedRcode = buf.readUInt8(offset + 4)
+ a.ednsVersion = buf.readUInt8(offset + 5)
+ a.flags = buf.readUInt16BE(offset + 6)
+ a.flag_do = ((a.flags >> 15) & 0x1) === 1
+ a.options = ropt.decode(buf, offset + 8)
+ offset += 8 + ropt.decode.bytes
+ } else {
+ const klass = buf.readUInt16BE(offset + 2)
+ a.ttl = buf.readUInt32BE(offset + 4)
+ a.class = classes.toString(klass & NOT_FLUSH_MASK)
+ a.flush = !!(klass & FLUSH_MASK)
+
+ const enc = renc(a.type)
+ a.data = enc.decode(buf, offset + 8)
+ offset += 8 + enc.decode.bytes
+ }
+
+ answer.decode.bytes = offset - oldOffset
+ return a
+}
+
+answer.decode.bytes = 0
+
+answer.encodingLength = function (a) {
+ const data = (a.data !== null && a.data !== undefined) ? a.data : a.options
+ return name.encodingLength(a.name) + 8 + renc(a.type).encodingLength(data)
+}
+
+const question = exports.question = {}
+
+question.encode = function (q, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(question.encodingLength(q))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ name.encode(q.name, buf, offset)
+ offset += name.encode.bytes
+
+ buf.writeUInt16BE(types.toType(q.type), offset)
+ offset += 2
+
+ buf.writeUInt16BE(classes.toClass(q.class === undefined ? 'IN' : q.class), offset)
+ offset += 2
+
+ question.encode.bytes = offset - oldOffset
+ return q
+}
+
+question.encode.bytes = 0
+
+question.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ const q = {}
+
+ q.name = name.decode(buf, offset)
+ offset += name.decode.bytes
+
+ q.type = types.toString(buf.readUInt16BE(offset))
+ offset += 2
+
+ q.class = classes.toString(buf.readUInt16BE(offset))
+ offset += 2
+
+ const qu = !!(q.class & QU_MASK)
+ if (qu) q.class &= NOT_QU_MASK
+
+ question.decode.bytes = offset - oldOffset
+ return q
+}
+
+question.decode.bytes = 0
+
+question.encodingLength = function (q) {
+ return name.encodingLength(q.name) + 4
+}
+
+exports.AUTHORITATIVE_ANSWER = 1 << 10
+exports.TRUNCATED_RESPONSE = 1 << 9
+exports.RECURSION_DESIRED = 1 << 8
+exports.RECURSION_AVAILABLE = 1 << 7
+exports.AUTHENTIC_DATA = 1 << 5
+exports.CHECKING_DISABLED = 1 << 4
+exports.DNSSEC_OK = 1 << 15
+
+exports.encode = function (result, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(exports.encodingLength(result))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ if (!result.questions) result.questions = []
+ if (!result.answers) result.answers = []
+ if (!result.authorities) result.authorities = []
+ if (!result.additionals) result.additionals = []
+
+ header.encode(result, buf, offset)
+ offset += header.encode.bytes
+
+ offset = encodeList(result.questions, question, buf, offset)
+ offset = encodeList(result.answers, answer, buf, offset)
+ offset = encodeList(result.authorities, answer, buf, offset)
+ offset = encodeList(result.additionals, answer, buf, offset)
+
+ exports.encode.bytes = offset - oldOffset
+
+ return buf
+}
+
+exports.encode.bytes = 0
+
+exports.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ const result = header.decode(buf, offset)
+ offset += header.decode.bytes
+
+ offset = decodeList(result.questions, question, buf, offset)
+ offset = decodeList(result.answers, answer, buf, offset)
+ offset = decodeList(result.authorities, answer, buf, offset)
+ offset = decodeList(result.additionals, answer, buf, offset)
+
+ exports.decode.bytes = offset - oldOffset
+
+ return result
+}
+
+exports.decode.bytes = 0
+
+exports.encodingLength = function (result) {
+ return header.encodingLength(result) +
+ encodingLengthList(result.questions || [], question) +
+ encodingLengthList(result.answers || [], answer) +
+ encodingLengthList(result.authorities || [], answer) +
+ encodingLengthList(result.additionals || [], answer)
+}
+
+exports.streamEncode = function (result) {
+ const buf = exports.encode(result)
+ const sbuf = Buffer.allocUnsafe(2)
+ sbuf.writeUInt16BE(buf.byteLength)
+ const combine = Buffer.concat([sbuf, buf])
+ exports.streamEncode.bytes = combine.byteLength
+ return combine
+}
+
+exports.streamEncode.bytes = 0
+
+exports.streamDecode = function (sbuf) {
+ const len = sbuf.readUInt16BE(0)
+ if (sbuf.byteLength < len + 2) {
+ // not enough data
+ return null
+ }
+ const result = exports.decode(sbuf.slice(2))
+ exports.streamDecode.bytes = exports.decode.bytes
+ return result
+}
+
+exports.streamDecode.bytes = 0
+
+function encodingLengthList (list, enc) {
+ let len = 0
+ for (let i = 0; i < list.length; i++) len += enc.encodingLength(list[i])
+ return len
+}
+
+function encodeList (list, enc, buf, offset) {
+ for (let i = 0; i < list.length; i++) {
+ enc.encode(list[i], buf, offset)
+ offset += enc.encode.bytes
+ }
+ return offset
+}
+
+function decodeList (list, enc, buf, offset) {
+ for (let i = 0; i < list.length; i++) {
+ list[i] = enc.decode(buf, offset)
+ offset += enc.decode.bytes
+ }
+ return offset
+}
diff --git a/testing/xpcshell/dns-packet/opcodes.js b/testing/xpcshell/dns-packet/opcodes.js
new file mode 100644
index 0000000000..32b0a1b4de
--- /dev/null
+++ b/testing/xpcshell/dns-packet/opcodes.js
@@ -0,0 +1,50 @@
+'use strict'
+
+/*
+ * Traditional DNS header OPCODEs (4-bits) defined by IANA in
+ * https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-5
+ */
+
+exports.toString = function (opcode) {
+ switch (opcode) {
+ case 0: return 'QUERY'
+ case 1: return 'IQUERY'
+ case 2: return 'STATUS'
+ case 3: return 'OPCODE_3'
+ case 4: return 'NOTIFY'
+ case 5: return 'UPDATE'
+ case 6: return 'OPCODE_6'
+ case 7: return 'OPCODE_7'
+ case 8: return 'OPCODE_8'
+ case 9: return 'OPCODE_9'
+ case 10: return 'OPCODE_10'
+ case 11: return 'OPCODE_11'
+ case 12: return 'OPCODE_12'
+ case 13: return 'OPCODE_13'
+ case 14: return 'OPCODE_14'
+ case 15: return 'OPCODE_15'
+ }
+ return 'OPCODE_' + opcode
+}
+
+exports.toOpcode = function (code) {
+ switch (code.toUpperCase()) {
+ case 'QUERY': return 0
+ case 'IQUERY': return 1
+ case 'STATUS': return 2
+ case 'OPCODE_3': return 3
+ case 'NOTIFY': return 4
+ case 'UPDATE': return 5
+ case 'OPCODE_6': return 6
+ case 'OPCODE_7': return 7
+ case 'OPCODE_8': return 8
+ case 'OPCODE_9': return 9
+ case 'OPCODE_10': return 10
+ case 'OPCODE_11': return 11
+ case 'OPCODE_12': return 12
+ case 'OPCODE_13': return 13
+ case 'OPCODE_14': return 14
+ case 'OPCODE_15': return 15
+ }
+ return 0
+}
diff --git a/testing/xpcshell/dns-packet/optioncodes.js b/testing/xpcshell/dns-packet/optioncodes.js
new file mode 100644
index 0000000000..a683ce81e6
--- /dev/null
+++ b/testing/xpcshell/dns-packet/optioncodes.js
@@ -0,0 +1,61 @@
+'use strict'
+
+exports.toString = function (type) {
+ switch (type) {
+ // list at
+ // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-11
+ case 1: return 'LLQ'
+ case 2: return 'UL'
+ case 3: return 'NSID'
+ case 5: return 'DAU'
+ case 6: return 'DHU'
+ case 7: return 'N3U'
+ case 8: return 'CLIENT_SUBNET'
+ case 9: return 'EXPIRE'
+ case 10: return 'COOKIE'
+ case 11: return 'TCP_KEEPALIVE'
+ case 12: return 'PADDING'
+ case 13: return 'CHAIN'
+ case 14: return 'KEY_TAG'
+ case 15: return 'EDNS_ERROR'
+ case 26946: return 'DEVICEID'
+ }
+ if (type < 0) {
+ return null
+ }
+ return `OPTION_${type}`
+}
+
+exports.toCode = function (name) {
+ if (typeof name === 'number') {
+ return name
+ }
+ if (!name) {
+ return -1
+ }
+ switch (name.toUpperCase()) {
+ case 'OPTION_0': return 0
+ case 'LLQ': return 1
+ case 'UL': return 2
+ case 'NSID': return 3
+ case 'OPTION_4': return 4
+ case 'DAU': return 5
+ case 'DHU': return 6
+ case 'N3U': return 7
+ case 'CLIENT_SUBNET': return 8
+ case 'EXPIRE': return 9
+ case 'COOKIE': return 10
+ case 'TCP_KEEPALIVE': return 11
+ case 'PADDING': return 12
+ case 'CHAIN': return 13
+ case 'KEY_TAG': return 14
+ case 'EDNS_ERROR': return 15
+ case 'DEVICEID': return 26946
+ case 'OPTION_65535': return 65535
+ }
+ const m = name.match(/_(\d+)$/)
+ if (m) {
+ return parseInt(m[1], 10)
+ }
+ return -1
+}
diff --git a/testing/xpcshell/dns-packet/package.json b/testing/xpcshell/dns-packet/package.json
new file mode 100644
index 0000000000..31a859fc2b
--- /dev/null
+++ b/testing/xpcshell/dns-packet/package.json
@@ -0,0 +1,48 @@
+{
+ "name": "dns-packet",
+ "version": "5.2.1",
+ "description": "An abstract-encoding compliant module for encoding / decoding DNS packets",
+ "author": "Mathias Buus",
+ "license": "MIT",
+ "repository": "mafintosh/dns-packet",
+ "homepage": "https://github.com/mafintosh/dns-packet",
+ "engines": {
+ "node": ">=6"
+ },
+ "scripts": {
+ "clean": "rm -rf coverage .nyc_output/",
+ "lint": "eslint --color *.js examples/*.js",
+ "pretest": "npm run lint",
+ "test": "tape test.js",
+ "coverage": "nyc -r html npm test"
+ },
+ "dependencies": {
+ "ip": "^1.1.5"
+ },
+ "devDependencies": {
+ "eslint": "^5.14.1",
+ "eslint-config-standard": "^12.0.0",
+ "eslint-plugin-import": "^2.16.0",
+ "eslint-plugin-node": "^8.0.1",
+ "eslint-plugin-promise": "^4.0.1",
+ "eslint-plugin-standard": "^4.0.0",
+ "nyc": "^13.3.0",
+ "tape": "^4.10.1"
+ },
+ "keywords": [
+ "dns",
+ "packet",
+ "encodings",
+ "encoding",
+ "encoder",
+ "abstract-encoding"
+ ],
+ "files": [
+ "index.js",
+ "types.js",
+ "rcodes.js",
+ "opcodes.js",
+ "classes.js",
+ "optioncodes.js"
+ ]
+}
diff --git a/testing/xpcshell/dns-packet/rcodes.js b/testing/xpcshell/dns-packet/rcodes.js
new file mode 100644
index 0000000000..0500887c2a
--- /dev/null
+++ b/testing/xpcshell/dns-packet/rcodes.js
@@ -0,0 +1,50 @@
+'use strict'
+
+/*
+ * Traditional DNS header RCODEs (4-bits) defined by IANA in
+ * https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
+ */
+
+exports.toString = function (rcode) {
+ switch (rcode) {
+ case 0: return 'NOERROR'
+ case 1: return 'FORMERR'
+ case 2: return 'SERVFAIL'
+ case 3: return 'NXDOMAIN'
+ case 4: return 'NOTIMP'
+ case 5: return 'REFUSED'
+ case 6: return 'YXDOMAIN'
+ case 7: return 'YXRRSET'
+ case 8: return 'NXRRSET'
+ case 9: return 'NOTAUTH'
+ case 10: return 'NOTZONE'
+ case 11: return 'RCODE_11'
+ case 12: return 'RCODE_12'
+ case 13: return 'RCODE_13'
+ case 14: return 'RCODE_14'
+ case 15: return 'RCODE_15'
+ }
+ return 'RCODE_' + rcode
+}
+
+exports.toRcode = function (code) {
+ switch (code.toUpperCase()) {
+ case 'NOERROR': return 0
+ case 'FORMERR': return 1
+ case 'SERVFAIL': return 2
+ case 'NXDOMAIN': return 3
+ case 'NOTIMP': return 4
+ case 'REFUSED': return 5
+ case 'YXDOMAIN': return 6
+ case 'YXRRSET': return 7
+ case 'NXRRSET': return 8
+ case 'NOTAUTH': return 9
+ case 'NOTZONE': return 10
+ case 'RCODE_11': return 11
+ case 'RCODE_12': return 12
+ case 'RCODE_13': return 13
+ case 'RCODE_14': return 14
+ case 'RCODE_15': return 15
+ }
+ return 0
+}
diff --git a/testing/xpcshell/dns-packet/test.js b/testing/xpcshell/dns-packet/test.js
new file mode 100644
index 0000000000..adf4757dae
--- /dev/null
+++ b/testing/xpcshell/dns-packet/test.js
@@ -0,0 +1,613 @@
+'use strict'
+
+const tape = require('tape')
+const packet = require('./')
+const rcodes = require('./rcodes')
+const opcodes = require('./opcodes')
+const optioncodes = require('./optioncodes')
+
+tape('unknown', function (t) {
+ testEncoder(t, packet.unknown, Buffer.from('hello world'))
+ t.end()
+})
+
+tape('txt', function (t) {
+ testEncoder(t, packet.txt, [])
+ testEncoder(t, packet.txt, ['hello world'])
+ testEncoder(t, packet.txt, ['hello', 'world'])
+ testEncoder(t, packet.txt, [Buffer.from([0, 1, 2, 3, 4, 5])])
+ testEncoder(t, packet.txt, ['a', 'b', Buffer.from([0, 1, 2, 3, 4, 5])])
+ testEncoder(t, packet.txt, ['', Buffer.allocUnsafe(0)])
+ t.end()
+})
+
+tape('txt-scalar-string', function (t) {
+ const buf = packet.txt.encode('hi')
+ const val = packet.txt.decode(buf)
+ t.ok(val.length === 1, 'array length')
+ t.ok(val[0].toString() === 'hi', 'data')
+ t.end()
+})
+
+tape('txt-scalar-buffer', function (t) {
+ const data = Buffer.from([0, 1, 2, 3, 4, 5])
+ const buf = packet.txt.encode(data)
+ const val = packet.txt.decode(buf)
+ t.ok(val.length === 1, 'array length')
+ t.ok(val[0].equals(data), 'data')
+ t.end()
+})
+
+tape('txt-invalid-data', function (t) {
+ t.throws(function () { packet.txt.encode(null) }, 'null')
+ t.throws(function () { packet.txt.encode(undefined) }, 'undefined')
+ t.throws(function () { packet.txt.encode(10) }, 'number')
+ t.end()
+})
+
+tape('null', function (t) {
+ testEncoder(t, packet.null, Buffer.from([0, 1, 2, 3, 4, 5]))
+ t.end()
+})
+
+tape('hinfo', function (t) {
+ testEncoder(t, packet.hinfo, { cpu: 'intel', os: 'best one' })
+ t.end()
+})
+
+tape('ptr', function (t) {
+ testEncoder(t, packet.ptr, 'hello.world.com')
+ t.end()
+})
+
+tape('cname', function (t) {
+ testEncoder(t, packet.cname, 'hello.cname.world.com')
+ t.end()
+})
+
+tape('dname', function (t) {
+ testEncoder(t, packet.dname, 'hello.dname.world.com')
+ t.end()
+})
+
+tape('srv', function (t) {
+ testEncoder(t, packet.srv, { port: 9999, target: 'hello.world.com' })
+ testEncoder(t, packet.srv, { port: 9999, target: 'hello.world.com', priority: 42, weight: 10 })
+ t.end()
+})
+
+tape('caa', function (t) {
+ testEncoder(t, packet.caa, { flags: 128, tag: 'issue', value: 'letsencrypt.org', issuerCritical: true })
+ testEncoder(t, packet.caa, { tag: 'issue', value: 'letsencrypt.org', issuerCritical: true })
+ testEncoder(t, packet.caa, { tag: 'issue', value: 'letsencrypt.org' })
+ t.end()
+})
+
+tape('mx', function (t) {
+ testEncoder(t, packet.mx, { preference: 10, exchange: 'mx.hello.world.com' })
+ testEncoder(t, packet.mx, { exchange: 'mx.hello.world.com' })
+ t.end()
+})
+
+tape('ns', function (t) {
+ testEncoder(t, packet.ns, 'ns.world.com')
+ t.end()
+})
+
+tape('soa', function (t) {
+ testEncoder(t, packet.soa, {
+ mname: 'hello.world.com',
+ rname: 'root.hello.world.com',
+ serial: 2018010400,
+ refresh: 14400,
+ retry: 3600,
+ expire: 604800,
+ minimum: 3600
+ })
+ t.end()
+})
+
+tape('a', function (t) {
+ testEncoder(t, packet.a, '127.0.0.1')
+ t.end()
+})
+
+tape('aaaa', function (t) {
+ testEncoder(t, packet.aaaa, 'fe80::1')
+ t.end()
+})
+
+tape('query', function (t) {
+ testEncoder(t, packet, {
+ type: 'query',
+ questions: [{
+ type: 'A',
+ name: 'hello.a.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'query',
+ id: 42,
+ questions: [{
+ type: 'A',
+ class: 'IN',
+ name: 'hello.a.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'query',
+ id: 42,
+ questions: [{
+ type: 'A',
+ class: 'CH',
+ name: 'hello.a.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com'
+ }]
+ })
+
+ t.end()
+})
+
+tape('response', function (t) {
+ testEncoder(t, packet, {
+ type: 'response',
+ answers: [{
+ type: 'A',
+ class: 'IN',
+ flush: true,
+ name: 'hello.a.com',
+ data: '127.0.0.1'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'response',
+ flags: packet.TRUNCATED_RESPONSE,
+ answers: [{
+ type: 'A',
+ class: 'IN',
+ name: 'hello.a.com',
+ data: '127.0.0.1'
+ }, {
+ type: 'SRV',
+ class: 'IN',
+ name: 'hello.srv.com',
+ data: {
+ port: 9090,
+ target: 'hello.target.com'
+ }
+ }, {
+ type: 'CNAME',
+ class: 'IN',
+ name: 'hello.cname.com',
+ data: 'hello.other.domain.com'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'response',
+ id: 100,
+ flags: 0,
+ additionals: [{
+ type: 'AAAA',
+ name: 'hello.a.com',
+ data: 'fe80::1'
+ }, {
+ type: 'PTR',
+ name: 'hello.ptr.com',
+ data: 'hello.other.ptr.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com',
+ ttl: 42,
+ data: {
+ port: 9090,
+ target: 'hello.target.com'
+ }
+ }],
+ answers: [{
+ type: 'NULL',
+ name: 'hello.null.com',
+ data: Buffer.from([1, 2, 3, 4, 5])
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'response',
+ answers: [{
+ type: 'TXT',
+ name: 'emptytxt.com',
+ data: ''
+ }]
+ })
+
+ t.end()
+})
+
+tape('rcode', function (t) {
+ const errors = ['NOERROR', 'FORMERR', 'SERVFAIL', 'NXDOMAIN', 'NOTIMP', 'REFUSED', 'YXDOMAIN', 'YXRRSET', 'NXRRSET', 'NOTAUTH', 'NOTZONE', 'RCODE_11', 'RCODE_12', 'RCODE_13', 'RCODE_14', 'RCODE_15']
+ for (const i in errors) {
+ const code = rcodes.toRcode(errors[i])
+ t.ok(errors[i] === rcodes.toString(code), 'rcode conversion from/to string matches: ' + rcodes.toString(code))
+ }
+
+ const ops = ['QUERY', 'IQUERY', 'STATUS', 'OPCODE_3', 'NOTIFY', 'UPDATE', 'OPCODE_6', 'OPCODE_7', 'OPCODE_8', 'OPCODE_9', 'OPCODE_10', 'OPCODE_11', 'OPCODE_12', 'OPCODE_13', 'OPCODE_14', 'OPCODE_15']
+ for (const j in ops) {
+ const ocode = opcodes.toOpcode(ops[j])
+ t.ok(ops[j] === opcodes.toString(ocode), 'opcode conversion from/to string matches: ' + opcodes.toString(ocode))
+ }
+
+ const buf = packet.encode({
+ type: 'response',
+ id: 45632,
+ flags: 0x8480,
+ answers: [{
+ type: 'A',
+ name: 'hello.example.net',
+ data: '127.0.0.1'
+ }]
+ })
+ const val = packet.decode(buf)
+ t.ok(val.type === 'response', 'decode type')
+ t.ok(val.opcode === 'QUERY', 'decode opcode')
+ t.ok(val.flag_qr === true, 'decode flag_qr')
+ t.ok(val.flag_aa === true, 'decode flag_aa')
+ t.ok(val.flag_tc === false, 'decode flag_tc')
+ t.ok(val.flag_rd === false, 'decode flag_rd')
+ t.ok(val.flag_ra === true, 'decode flag_ra')
+ t.ok(val.flag_z === false, 'decode flag_z')
+ t.ok(val.flag_ad === false, 'decode flag_ad')
+ t.ok(val.flag_cd === false, 'decode flag_cd')
+ t.ok(val.rcode === 'NOERROR', 'decode rcode')
+ t.end()
+})
+
+tape('name_encoding', function (t) {
+ let data = 'foo.example.com'
+ const buf = Buffer.allocUnsafe(255)
+ let offset = 0
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 17, 'name encoding length matches')
+ let dd = packet.name.decode(buf, offset)
+ t.ok(data === dd, 'encode/decode matches')
+ offset += packet.name.encode.bytes
+
+ data = 'com'
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 5, 'name encoding length matches')
+ dd = packet.name.decode(buf, offset)
+ t.ok(data === dd, 'encode/decode matches')
+ offset += packet.name.encode.bytes
+
+ data = 'example.com.'
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 13, 'name encoding length matches')
+ dd = packet.name.decode(buf, offset)
+ t.ok(data.slice(0, -1) === dd, 'encode/decode matches')
+ offset += packet.name.encode.bytes
+
+ data = '.'
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 1, 'name encoding length matches')
+ dd = packet.name.decode(buf, offset)
+ t.ok(data === dd, 'encode/decode matches')
+ t.end()
+})
+
+tape('stream', function (t) {
+ const val = {
+ type: 'query',
+ id: 45632,
+ flags: 0x8480,
+ answers: [{
+ type: 'A',
+ name: 'test2.example.net',
+ data: '198.51.100.1'
+ }]
+ }
+ const buf = packet.streamEncode(val)
+ const val2 = packet.streamDecode(buf)
+
+ t.same(buf.length, packet.streamEncode.bytes, 'streamEncode.bytes was set correctly')
+ t.ok(compare(t, val2.type, val.type), 'streamDecoded type match')
+ t.ok(compare(t, val2.id, val.id), 'streamDecoded id match')
+ t.ok(parseInt(val2.flags) === parseInt(val.flags & 0x7FFF), 'streamDecoded flags match')
+ const answer = val.answers[0]
+ const answer2 = val2.answers[0]
+ t.ok(compare(t, answer.type, answer2.type), 'streamDecoded RR type match')
+ t.ok(compare(t, answer.name, answer2.name), 'streamDecoded RR name match')
+ t.ok(compare(t, answer.data, answer2.data), 'streamDecoded RR rdata match')
+ t.end()
+})
+
+tape('opt', function (t) {
+ const val = {
+ type: 'query',
+ questions: [{
+ type: 'A',
+ name: 'hello.a.com'
+ }],
+ additionals: [{
+ type: 'OPT',
+ name: '.',
+ udpPayloadSize: 1024
+ }]
+ }
+ testEncoder(t, packet, val)
+ let buf = packet.encode(val)
+ let val2 = packet.decode(buf)
+ const additional1 = val.additionals[0]
+ let additional2 = val2.additionals[0]
+ t.ok(compare(t, additional1.name, additional2.name), 'name matches')
+ t.ok(compare(t, additional1.udpPayloadSize, additional2.udpPayloadSize), 'udp payload size matches')
+ t.ok(compare(t, 0, additional2.flags), 'flags match')
+ additional1.flags = packet.DNSSEC_OK
+ additional1.extendedRcode = 0x80
+ additional1.options = [ {
+ code: 'CLIENT_SUBNET', // edns-client-subnet, see RFC 7871
+ ip: 'fe80::',
+ sourcePrefixLength: 64
+ }, {
+ code: 8, // still ECS
+ ip: '5.6.0.0',
+ sourcePrefixLength: 16,
+ scopePrefixLength: 16
+ }, {
+ code: 'padding',
+ length: 31
+ }, {
+ code: 'TCP_KEEPALIVE'
+ }, {
+ code: 'tcp_keepalive',
+ timeout: 150
+ }, {
+ code: 'KEY_TAG',
+ tags: [1, 82, 987]
+ }]
+ buf = packet.encode(val)
+ val2 = packet.decode(buf)
+ additional2 = val2.additionals[0]
+ t.ok(compare(t, 1 << 15, additional2.flags), 'DO bit set in flags')
+ t.ok(compare(t, true, additional2.flag_do), 'DO bit set')
+ t.ok(compare(t, additional1.extendedRcode, additional2.extendedRcode), 'extended rcode matches')
+ t.ok(compare(t, 8, additional2.options[0].code))
+ t.ok(compare(t, 'fe80::', additional2.options[0].ip))
+ t.ok(compare(t, 64, additional2.options[0].sourcePrefixLength))
+ t.ok(compare(t, '5.6.0.0', additional2.options[1].ip))
+ t.ok(compare(t, 16, additional2.options[1].sourcePrefixLength))
+ t.ok(compare(t, 16, additional2.options[1].scopePrefixLength))
+ t.ok(compare(t, additional1.options[2].length, additional2.options[2].data.length))
+ t.ok(compare(t, additional1.options[3].timeout, undefined))
+ t.ok(compare(t, additional1.options[4].timeout, additional2.options[4].timeout))
+ t.ok(compare(t, additional1.options[5].tags, additional2.options[5].tags))
+ t.end()
+})
+
+tape('dnskey', function (t) {
+ testEncoder(t, packet.dnskey, {
+ flags: packet.dnskey.SECURE_ENTRYPOINT | packet.dnskey.ZONE_KEY,
+ algorithm: 1,
+ key: Buffer.from([0, 1, 2, 3, 4, 5])
+ })
+ t.end()
+})
+
+tape('rrsig', function (t) {
+ const testRRSIG = {
+ typeCovered: 'A',
+ algorithm: 1,
+ labels: 2,
+ originalTTL: 3600,
+ expiration: 1234,
+ inception: 1233,
+ keyTag: 2345,
+ signersName: 'foo.com',
+ signature: Buffer.from([0, 1, 2, 3, 4, 5])
+ }
+ testEncoder(t, packet.rrsig, testRRSIG)
+
+ // Check the signature length is correct with extra junk at the end
+ const buf = Buffer.allocUnsafe(packet.rrsig.encodingLength(testRRSIG) + 4)
+ packet.rrsig.encode(testRRSIG, buf)
+ const val2 = packet.rrsig.decode(buf)
+ t.ok(compare(t, testRRSIG, val2))
+
+ t.end()
+})
+
+tape('rrp', function (t) {
+ testEncoder(t, packet.rp, {
+ mbox: 'foo.bar.com',
+ txt: 'baz.bar.com'
+ })
+ testEncoder(t, packet.rp, {
+ mbox: 'foo.bar.com'
+ })
+ testEncoder(t, packet.rp, {
+ txt: 'baz.bar.com'
+ })
+ testEncoder(t, packet.rp, {})
+ t.end()
+})
+
+tape('nsec', function (t) {
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['A', 'DNSKEY', 'CAA', 'DLV']
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TXT'] // 16
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TKEY'] // 249
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['RRSIG', 'NSEC']
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TXT', 'RRSIG']
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TXT', 'NSEC']
+ })
+
+ // Test with the sample NSEC from https://tools.ietf.org/html/rfc4034#section-4.3
+ var sampleNSEC = Buffer.from('003704686f7374076578616d706c6503636f6d00' +
+ '0006400100000003041b000000000000000000000000000000000000000000000' +
+ '000000020', 'hex')
+ var decoded = packet.nsec.decode(sampleNSEC)
+ t.ok(compare(t, decoded, {
+ nextDomain: 'host.example.com',
+ rrtypes: ['A', 'MX', 'RRSIG', 'NSEC', 'UNKNOWN_1234']
+ }))
+ var reencoded = packet.nsec.encode(decoded)
+ t.same(sampleNSEC.length, reencoded.length)
+ t.same(sampleNSEC, reencoded)
+ t.end()
+})
+
+tape('nsec3', function (t) {
+ testEncoder(t, packet.nsec3, {
+ algorithm: 1,
+ flags: 0,
+ iterations: 257,
+ salt: Buffer.from([42, 42, 42]),
+ nextDomain: Buffer.from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
+ rrtypes: ['A', 'DNSKEY', 'CAA', 'DLV']
+ })
+ t.end()
+})
+
+tape('ds', function (t) {
+ testEncoder(t, packet.ds, {
+ keyTag: 1234,
+ algorithm: 1,
+ digestType: 1,
+ digest: Buffer.from([0, 1, 2, 3, 4, 5])
+ })
+ t.end()
+})
+
+tape('unpack', function (t) {
+ const buf = Buffer.from([
+ 0x00, 0x79,
+ 0xde, 0xad, 0x85, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x02, 0x00, 0x02, 0x02, 0x6f, 0x6a, 0x05,
+ 0x62, 0x61, 0x6e, 0x67, 0x6a, 0x03, 0x63, 0x6f,
+ 0x6d, 0x00, 0x00, 0x01, 0x00, 0x01, 0xc0, 0x0c,
+ 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10,
+ 0x00, 0x04, 0x81, 0xfa, 0x0b, 0xaa, 0xc0, 0x0f,
+ 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10,
+ 0x00, 0x05, 0x02, 0x63, 0x6a, 0xc0, 0x0f, 0xc0,
+ 0x0f, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x0e,
+ 0x10, 0x00, 0x02, 0xc0, 0x0c, 0xc0, 0x3a, 0x00,
+ 0x01, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10, 0x00,
+ 0x04, 0x45, 0x4d, 0x9b, 0x9c, 0xc0, 0x0c, 0x00,
+ 0x1c, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10, 0x00,
+ 0x10, 0x20, 0x01, 0x04, 0x18, 0x00, 0x00, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf9
+ ])
+ const val = packet.streamDecode(buf)
+ const answer = val.answers[0]
+ const authority = val.authorities[1]
+ t.ok(val.rcode === 'NOERROR', 'decode rcode')
+ t.ok(compare(t, answer.type, 'A'), 'streamDecoded RR type match')
+ t.ok(compare(t, answer.name, 'oj.bangj.com'), 'streamDecoded RR name match')
+ t.ok(compare(t, answer.data, '129.250.11.170'), 'streamDecoded RR rdata match')
+ t.ok(compare(t, authority.type, 'NS'), 'streamDecoded RR type match')
+ t.ok(compare(t, authority.name, 'bangj.com'), 'streamDecoded RR name match')
+ t.ok(compare(t, authority.data, 'oj.bangj.com'), 'streamDecoded RR rdata match')
+ t.end()
+})
+
+tape('optioncodes', function (t) {
+ const opts = [
+ [0, 'OPTION_0'],
+ [1, 'LLQ'],
+ [2, 'UL'],
+ [3, 'NSID'],
+ [4, 'OPTION_4'],
+ [5, 'DAU'],
+ [6, 'DHU'],
+ [7, 'N3U'],
+ [8, 'CLIENT_SUBNET'],
+ [9, 'EXPIRE'],
+ [10, 'COOKIE'],
+ [11, 'TCP_KEEPALIVE'],
+ [12, 'PADDING'],
+ [13, 'CHAIN'],
+ [14, 'KEY_TAG'],
+ [26946, 'DEVICEID'],
+ [65535, 'OPTION_65535'],
+ [64000, 'OPTION_64000'],
+ [65002, 'OPTION_65002'],
+ [-1, null]
+ ]
+ for (const [code, str] of opts) {
+ const s = optioncodes.toString(code)
+ t.ok(compare(t, s, str), `${code} => ${str}`)
+ t.ok(compare(t, optioncodes.toCode(s), code), `${str} => ${code}`)
+ }
+ t.ok(compare(t, optioncodes.toCode('INVALIDINVALID'), -1))
+ t.end()
+})
+
+function testEncoder (t, rpacket, val) {
+ const buf = rpacket.encode(val)
+ const val2 = rpacket.decode(buf)
+
+ t.same(buf.length, rpacket.encode.bytes, 'encode.bytes was set correctly')
+ t.same(buf.length, rpacket.encodingLength(val), 'encoding length matches')
+ t.ok(compare(t, val, val2), 'decoded object match')
+
+ const buf2 = rpacket.encode(val2)
+ const val3 = rpacket.decode(buf2)
+
+ t.same(buf2.length, rpacket.encode.bytes, 'encode.bytes was set correctly on re-encode')
+ t.same(buf2.length, rpacket.encodingLength(val), 'encoding length matches on re-encode')
+
+ t.ok(compare(t, val, val3), 'decoded object match on re-encode')
+ t.ok(compare(t, val2, val3), 're-encoded decoded object match on re-encode')
+
+ const bigger = Buffer.allocUnsafe(buf2.length + 10)
+
+ const buf3 = rpacket.encode(val, bigger, 10)
+ const val4 = rpacket.decode(buf3, 10)
+
+ t.ok(buf3 === bigger, 'echoes buffer on external buffer')
+ t.same(rpacket.encode.bytes, buf.length, 'encode.bytes is the same on external buffer')
+ t.ok(compare(t, val, val4), 'decoded object match on external buffer')
+}
+
+function compare (t, a, b) {
+ if (Buffer.isBuffer(a)) return a.toString('hex') === b.toString('hex')
+ if (typeof a === 'object' && a && b) {
+ const keys = Object.keys(a)
+ for (let i = 0; i < keys.length; i++) {
+ if (!compare(t, a[keys[i]], b[keys[i]])) {
+ return false
+ }
+ }
+ } else if (Array.isArray(b) && !Array.isArray(a)) {
+ // TXT always decode as array
+ return a.toString() === b[0].toString()
+ } else {
+ return a === b
+ }
+ return true
+}
diff --git a/testing/xpcshell/dns-packet/types.js b/testing/xpcshell/dns-packet/types.js
new file mode 100644
index 0000000000..110705b160
--- /dev/null
+++ b/testing/xpcshell/dns-packet/types.js
@@ -0,0 +1,105 @@
+'use strict'
+
+exports.toString = function (type) {
+ switch (type) {
+ case 1: return 'A'
+ case 10: return 'NULL'
+ case 28: return 'AAAA'
+ case 18: return 'AFSDB'
+ case 42: return 'APL'
+ case 257: return 'CAA'
+ case 60: return 'CDNSKEY'
+ case 59: return 'CDS'
+ case 37: return 'CERT'
+ case 5: return 'CNAME'
+ case 49: return 'DHCID'
+ case 32769: return 'DLV'
+ case 39: return 'DNAME'
+ case 48: return 'DNSKEY'
+ case 43: return 'DS'
+ case 55: return 'HIP'
+ case 13: return 'HINFO'
+ case 45: return 'IPSECKEY'
+ case 25: return 'KEY'
+ case 36: return 'KX'
+ case 29: return 'LOC'
+ case 15: return 'MX'
+ case 35: return 'NAPTR'
+ case 2: return 'NS'
+ case 47: return 'NSEC'
+ case 50: return 'NSEC3'
+ case 51: return 'NSEC3PARAM'
+ case 12: return 'PTR'
+ case 46: return 'RRSIG'
+ case 17: return 'RP'
+ case 24: return 'SIG'
+ case 6: return 'SOA'
+ case 99: return 'SPF'
+ case 33: return 'SRV'
+ case 44: return 'SSHFP'
+ case 32768: return 'TA'
+ case 249: return 'TKEY'
+ case 52: return 'TLSA'
+ case 250: return 'TSIG'
+ case 16: return 'TXT'
+ case 252: return 'AXFR'
+ case 251: return 'IXFR'
+ case 41: return 'OPT'
+ case 255: return 'ANY'
+ case 65: return 'HTTPS'
+ }
+ return 'UNKNOWN_' + type
+}
+
+exports.toType = function (name) {
+ switch (name.toUpperCase()) {
+ case 'A': return 1
+ case 'NULL': return 10
+ case 'AAAA': return 28
+ case 'AFSDB': return 18
+ case 'APL': return 42
+ case 'CAA': return 257
+ case 'CDNSKEY': return 60
+ case 'CDS': return 59
+ case 'CERT': return 37
+ case 'CNAME': return 5
+ case 'DHCID': return 49
+ case 'DLV': return 32769
+ case 'DNAME': return 39
+ case 'DNSKEY': return 48
+ case 'DS': return 43
+ case 'HIP': return 55
+ case 'HINFO': return 13
+ case 'IPSECKEY': return 45
+ case 'KEY': return 25
+ case 'KX': return 36
+ case 'LOC': return 29
+ case 'MX': return 15
+ case 'NAPTR': return 35
+ case 'NS': return 2
+ case 'NSEC': return 47
+ case 'NSEC3': return 50
+ case 'NSEC3PARAM': return 51
+ case 'PTR': return 12
+ case 'RRSIG': return 46
+ case 'RP': return 17
+ case 'SIG': return 24
+ case 'SOA': return 6
+ case 'SPF': return 99
+ case 'SRV': return 33
+ case 'SSHFP': return 44
+ case 'TA': return 32768
+ case 'TKEY': return 249
+ case 'TLSA': return 52
+ case 'TSIG': return 250
+ case 'TXT': return 16
+ case 'AXFR': return 252
+ case 'IXFR': return 251
+ case 'OPT': return 41
+ case 'ANY': return 255
+ case 'HTTPS': return 65
+ case '*': return 255
+ }
+ if (name.toUpperCase().startsWith('UNKNOWN_')) return parseInt(name.slice(8))
+ return 0
+}
diff --git a/testing/xpcshell/example/moz.build b/testing/xpcshell/example/moz.build
new file mode 100644
index 0000000000..33b544a134
--- /dev/null
+++ b/testing/xpcshell/example/moz.build
@@ -0,0 +1,12 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This is a list of directories containing tests to run, separated by spaces.
+# Most likely, tho, you won't use more than one directory here.
+XPCSHELL_TESTS_MANIFESTS += [
+ "unit/xpcshell-with-prefs.ini",
+ "unit/xpcshell.ini",
+]
diff --git a/testing/xpcshell/example/unit/check_profile.js b/testing/xpcshell/example/unit/check_profile.js
new file mode 100644
index 0000000000..57ecf5fd55
--- /dev/null
+++ b/testing/xpcshell/example/unit/check_profile.js
@@ -0,0 +1,44 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function check_profile_dir(profd) {
+ Assert.ok(profd.exists());
+ Assert.ok(profd.isDirectory());
+ let profd2 = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ Assert.ok(profd2.exists());
+ Assert.ok(profd2.isDirectory());
+ // make sure we got the same thing back...
+ Assert.ok(profd.equals(profd2));
+}
+
+function check_do_get_profile(fireProfileAfterChange) {
+ const observedTopics = new Map([
+ ["profile-do-change", 0],
+ ["profile-after-change", 0],
+ ]);
+ const expectedTopics = new Map(observedTopics);
+
+ for (let [topic] of observedTopics) {
+ Services.obs.addObserver(() => {
+ let val = observedTopics.get(topic) + 1;
+ observedTopics.set(topic, val);
+ }, topic);
+ }
+
+ // Trigger profile creation.
+ let profd = do_get_profile();
+ check_profile_dir(profd);
+
+ // Check the observed topics
+ expectedTopics.set("profile-do-change", 1);
+ if (fireProfileAfterChange) {
+ expectedTopics.set("profile-after-change", 1);
+ }
+ Assert.deepEqual(observedTopics, expectedTopics);
+
+ // A second do_get_profile() should not trigger more notifications.
+ profd = do_get_profile();
+ check_profile_dir(profd);
+ Assert.deepEqual(observedTopics, expectedTopics);
+}
diff --git a/testing/xpcshell/example/unit/file.txt b/testing/xpcshell/example/unit/file.txt
new file mode 100644
index 0000000000..ce01362503
--- /dev/null
+++ b/testing/xpcshell/example/unit/file.txt
@@ -0,0 +1 @@
+hello
diff --git a/testing/xpcshell/example/unit/import_module.sys.mjs b/testing/xpcshell/example/unit/import_module.sys.mjs
new file mode 100644
index 0000000000..aba93afc86
--- /dev/null
+++ b/testing/xpcshell/example/unit/import_module.sys.mjs
@@ -0,0 +1,9 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Module used by test_import_module.js
+
+export const MODULE_IMPORTED = true;
+
+export const MODULE_URI = import.meta.url;
diff --git a/testing/xpcshell/example/unit/load_subscript.js b/testing/xpcshell/example/unit/load_subscript.js
new file mode 100644
index 0000000000..bb0c4400b3
--- /dev/null
+++ b/testing/xpcshell/example/unit/load_subscript.js
@@ -0,0 +1,6 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* globals subscriptLoaded:true */
+subscriptLoaded = true;
diff --git a/testing/xpcshell/example/unit/location_load.js b/testing/xpcshell/example/unit/location_load.js
new file mode 100644
index 0000000000..c198b2e7de
--- /dev/null
+++ b/testing/xpcshell/example/unit/location_load.js
@@ -0,0 +1,8 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* globals __LOCATION__ */
+
+// Gets loaded via test_location.js
+Assert.equal(__LOCATION__.leafName, "location_load.js");
diff --git a/testing/xpcshell/example/unit/prefs_test_common.js b/testing/xpcshell/example/unit/prefs_test_common.js
new file mode 100644
index 0000000000..e12d3d5298
--- /dev/null
+++ b/testing/xpcshell/example/unit/prefs_test_common.js
@@ -0,0 +1,47 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function isValidPref(prefName) {
+ return Services.prefs.getPrefType(prefName) !== Services.prefs.PREF_INVALID;
+}
+
+// Check a pref that appears in testing/profiles/xpcshell/user.js
+// but NOT in StaticPrefList.yaml, modules/libpref/init/all.js
+function has_pref_from_xpcshell_user_js() {
+ return isValidPref("extensions.webextensions.warnings-as-errors");
+}
+
+// Test pref from xpcshell-with-prefs.ini
+function has_pref_from_manifest_defaults() {
+ return isValidPref("dummy.pref.from.test.manifest");
+}
+
+// Test pref set in xpcshell.ini and xpcshell-with-prefs.ini
+function has_pref_from_manifest_file_section() {
+ return isValidPref("dummy.pref.from.test.file");
+}
+
+function check_common_xpcshell_with_prefs() {
+ Assert.ok(
+ has_pref_from_xpcshell_user_js(),
+ "Should have pref from xpcshell's user.js"
+ );
+
+ Assert.ok(
+ has_pref_from_manifest_defaults(),
+ "Should have pref from DEFAULTS in xpcshell-with-prefs.ini"
+ );
+}
+
+function check_common_xpcshell_without_prefs() {
+ Assert.ok(
+ has_pref_from_xpcshell_user_js(),
+ "Should have pref from xpcshell's user.js"
+ );
+
+ Assert.ok(
+ !has_pref_from_manifest_defaults(),
+ "xpcshell.ini did not set any prefs in DEFAULTS"
+ );
+}
diff --git a/testing/xpcshell/example/unit/subdir/file.txt b/testing/xpcshell/example/unit/subdir/file.txt
new file mode 100644
index 0000000000..c4f6b5f708
--- /dev/null
+++ b/testing/xpcshell/example/unit/subdir/file.txt
@@ -0,0 +1 @@
+subdir hello
diff --git a/testing/xpcshell/example/unit/test_add_setup.js b/testing/xpcshell/example/unit/test_add_setup.js
new file mode 100644
index 0000000000..a647f108ee
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_add_setup.js
@@ -0,0 +1,23 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+let someVar = 1;
+
+add_task(() => {
+ Assert.ok(false, "I should not be called!");
+});
+
+/* eslint-disable mozilla/reject-addtask-only */
+add_task(() => {
+ Assert.equal(
+ someVar,
+ 2,
+ "Setup should have run, even though this is the only test."
+ );
+}).only();
+
+add_setup(() => {
+ someVar = 2;
+});
diff --git a/testing/xpcshell/example/unit/test_check_nsIException.js b/testing/xpcshell/example/unit/test_check_nsIException.js
new file mode 100644
index 0000000000..23889d1a97
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_check_nsIException.js
@@ -0,0 +1,10 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* import-globals-from ../../head.js */
+
+function run_test() {
+ do_check_throws_nsIException(function() {
+ Services.env.QueryInterface(Ci.nsIFile);
+ }, "NS_NOINTERFACE");
+}
diff --git a/testing/xpcshell/example/unit/test_check_nsIException_failing.js b/testing/xpcshell/example/unit/test_check_nsIException_failing.js
new file mode 100644
index 0000000000..5f559247d9
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_check_nsIException_failing.js
@@ -0,0 +1,10 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* import-globals-from ../../head.js */
+
+function run_test() {
+ do_check_throws_nsIException(function() {
+ throw Error("I find your relaxed dishabille unpalatable");
+ }, "NS_NOINTERFACE");
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_matches.js b/testing/xpcshell/example/unit/test_do_check_matches.js
new file mode 100644
index 0000000000..44ef0096fc
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_matches.js
@@ -0,0 +1,14 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ Assert.deepEqual({ x: 1 }, { x: 1 });
+
+ // Property order is irrelevant.
+ Assert.deepEqual({ x: "foo", y: "bar" }, { y: "bar", x: "foo" }); // pass
+
+ // Patterns nest.
+ Assert.deepEqual({ a: 1, b: { c: 2, d: 3 } }, { a: 1, b: { c: 2, d: 3 } });
+
+ Assert.deepEqual([3, 4, 5], [3, 4, 5]);
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_matches_failing.js b/testing/xpcshell/example/unit/test_do_check_matches_failing.js
new file mode 100644
index 0000000000..c45ec3469b
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_matches_failing.js
@@ -0,0 +1,12 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ Assert.deepEqual({ x: 1 }, {}); // fail: all pattern props required
+ Assert.deepEqual({ x: 1 }, { x: 2 }); // fail: values must match
+ Assert.deepEqual({ x: undefined }, {});
+
+ // 'length' property counts, even if non-enumerable.
+ Assert.deepEqual([3, 4, 5], [3, 5, 5]); // fail; value doesn't match
+ Assert.deepEqual([3, 4, 5], [3, 4, 5, 6]); // fail; length doesn't match
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_null.js b/testing/xpcshell/example/unit/test_do_check_null.js
new file mode 100644
index 0000000000..97ad824353
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_null.js
@@ -0,0 +1,6 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ Assert.equal(null, null);
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_null_failing.js b/testing/xpcshell/example/unit/test_do_check_null_failing.js
new file mode 100644
index 0000000000..981f5c838c
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_null_failing.js
@@ -0,0 +1,6 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ Assert.equal(null, 0);
+}
diff --git a/testing/xpcshell/example/unit/test_do_get_tempdir.js b/testing/xpcshell/example/unit/test_do_get_tempdir.js
new file mode 100644
index 0000000000..31c061f741
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_get_tempdir.js
@@ -0,0 +1,14 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* This tests that do_get_tempdir returns a directory that we can write to. */
+
+function run_test() {
+ let tmpd = do_get_tempdir();
+ Assert.ok(tmpd.exists());
+ tmpd.append("testfile");
+ tmpd.create(Ci.nsIFile.NORMAL_FILE_TYPE, 600);
+ Assert.ok(tmpd.exists());
+}
diff --git a/testing/xpcshell/example/unit/test_execute_soon.js b/testing/xpcshell/example/unit/test_execute_soon.js
new file mode 100644
index 0000000000..d4fb954e46
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_execute_soon.js
@@ -0,0 +1,20 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ * ***** END LICENSE BLOCK ***** */
+
+var complete = false;
+
+function run_test() {
+ dump("Starting test\n");
+ registerCleanupFunction(function() {
+ dump("Checking test completed\n");
+ Assert.ok(complete);
+ });
+
+ executeSoon(function execute_soon_callback() {
+ dump("do_execute_soon callback\n");
+ complete = true;
+ });
+}
diff --git a/testing/xpcshell/example/unit/test_fail.js b/testing/xpcshell/example/unit/test_fail.js
new file mode 100644
index 0000000000..0c203cd82e
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_fail.js
@@ -0,0 +1,8 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ // This test expects to fail.
+ Assert.ok(false);
+}
diff --git a/testing/xpcshell/example/unit/test_get_file.js b/testing/xpcshell/example/unit/test_get_file.js
new file mode 100644
index 0000000000..213c9e7233
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_get_file.js
@@ -0,0 +1,31 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ var lf = do_get_file("file.txt");
+ Assert.ok(lf.exists());
+ Assert.ok(lf.isFile());
+ // check that allowNonexistent works
+ lf = do_get_file("file.txt.notfound", true);
+ Assert.ok(!lf.exists());
+ // check that we can get a file from a subdirectory
+ lf = do_get_file("subdir/file.txt");
+ Assert.ok(lf.exists());
+ Assert.ok(lf.isFile());
+ // and that we can get a handle to a directory itself
+ lf = do_get_file("subdir/");
+ Assert.ok(lf.exists());
+ Assert.ok(lf.isDirectory());
+ // check that we can go up a level
+ lf = do_get_file("..");
+ Assert.ok(lf.exists());
+ lf.append("unit");
+ lf.append("file.txt");
+ Assert.ok(lf.exists());
+ // check that do_get_cwd works
+ lf = do_get_cwd();
+ Assert.ok(lf.exists());
+ Assert.ok(lf.isDirectory());
+}
diff --git a/testing/xpcshell/example/unit/test_get_idle.js b/testing/xpcshell/example/unit/test_get_idle.js
new file mode 100644
index 0000000000..ea01ce0247
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_get_idle.js
@@ -0,0 +1,24 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+function run_test() {
+ print("Init the fake idle service and check its identity.");
+ let fakeIdleService = Cc["@mozilla.org/widget/useridleservice;1"].getService(
+ Ci.nsIUserIdleService
+ );
+ try {
+ fakeIdleService.QueryInterface(Ci.nsIFactory);
+ } catch (ex) {
+ do_throw("The fake idle service implements nsIFactory.");
+ }
+ // We need at least one PASS, thus sanity check the idle time.
+ Assert.equal(fakeIdleService.idleTime, 0);
+
+ print("Init the real idle service and check its identity.");
+ let realIdleService = do_get_idle();
+ try {
+ realIdleService.QueryInterface(Ci.nsIFactory);
+ do_throw("The real idle service does not implement nsIFactory.");
+ } catch (ex) {}
+}
diff --git a/testing/xpcshell/example/unit/test_import_module.js b/testing/xpcshell/example/unit/test_import_module.js
new file mode 100644
index 0000000000..089ec34f8d
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_import_module.js
@@ -0,0 +1,19 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Ensures that tests can import a module in the same folder through:
+ * ChromeUtils.importESModule("resource://test/module.jsm");
+ */
+
+function run_test() {
+ Assert.ok(typeof this.MODULE_IMPORTED == "undefined");
+ Assert.ok(typeof this.MODULE_URI == "undefined");
+ let uri = "resource://test/import_module.sys.mjs";
+ let exports = ChromeUtils.importESModule(uri);
+ Assert.ok(exports.MODULE_URI == uri);
+ Assert.ok(exports.MODULE_IMPORTED);
+}
diff --git a/testing/xpcshell/example/unit/test_load.js b/testing/xpcshell/example/unit/test_load.js
new file mode 100644
index 0000000000..ec9b043ffd
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_load.js
@@ -0,0 +1,20 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var subscriptLoaded = false;
+
+function run_test() {
+ load("load_subscript.js");
+ Assert.ok(subscriptLoaded);
+ subscriptLoaded = false;
+ try {
+ load("file_that_does_not_exist.js");
+ subscriptLoaded = true;
+ } catch (ex) {
+ Assert.equal(ex.message.substring(0, 16), "cannot open file");
+ }
+ Assert.ok(!subscriptLoaded, "load() should throw an error");
+}
diff --git a/testing/xpcshell/example/unit/test_load_httpd_js.js b/testing/xpcshell/example/unit/test_load_httpd_js.js
new file mode 100644
index 0000000000..03a993730d
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_load_httpd_js.js
@@ -0,0 +1,13 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const { HttpServer } = ChromeUtils.import("resource://testing-common/httpd.js");
+
+function run_test() {
+ var httpserver = new HttpServer();
+ Assert.notEqual(httpserver, null);
+ Assert.notEqual(httpserver.QueryInterface(Ci.nsIHttpServer), null);
+}
diff --git a/testing/xpcshell/example/unit/test_location.js b/testing/xpcshell/example/unit/test_location.js
new file mode 100644
index 0000000000..3abd2f7910
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_location.js
@@ -0,0 +1,13 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* globals __LOCATION__ */
+
+function run_test() {
+ Assert.equal(__LOCATION__.leafName, "test_location.js");
+ // also check that __LOCATION__ works via load()
+ load("location_load.js");
+}
diff --git a/testing/xpcshell/example/unit/test_multiple_setups.js b/testing/xpcshell/example/unit/test_multiple_setups.js
new file mode 100644
index 0000000000..63d731c8a8
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_multiple_setups.js
@@ -0,0 +1,13 @@
+/* Any copyright is dedicated to the Public Domain.
+http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+let someVar = 0;
+
+add_setup(() => (someVar = 1));
+add_setup(() => (someVar = 2));
+
+add_task(async function test_setup_ordering() {
+ Assert.equal(someVar, 2, "Setups should have run in order.");
+});
diff --git a/testing/xpcshell/example/unit/test_multiple_tasks.js b/testing/xpcshell/example/unit/test_multiple_tasks.js
new file mode 100644
index 0000000000..46d1b21225
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_multiple_tasks.js
@@ -0,0 +1,20 @@
+/* Any copyright is dedicated to the Public Domain.
+http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+let someVar = 0;
+
+add_task(async function test_first() {
+ Assert.equal(someVar, 1, "I should run as the first test task.");
+ someVar++;
+});
+
+add_setup(function setup() {
+ Assert.equal(someVar, 0, "Should run setup first.");
+ someVar++;
+});
+
+add_task(async function test_second() {
+ Assert.equal(someVar, 2, "I should run as the second test task.");
+});
diff --git a/testing/xpcshell/example/unit/test_prefs_defaults.js b/testing/xpcshell/example/unit/test_prefs_defaults.js
new file mode 100644
index 0000000000..c6a93802e9
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_prefs_defaults.js
@@ -0,0 +1,18 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Bug 1781025 - ESLint's use of multi-ini doesn't fully cope with
+// mozilla-central's use of .ini files
+// eslint-disable-next-line no-unused-vars
+function run_test() {
+ /* import-globals-from prefs_test_common.js */
+ load("prefs_test_common.js");
+
+ check_common_xpcshell_with_prefs();
+
+ Assert.ok(
+ !has_pref_from_manifest_file_section(),
+ "Should not have pref that was only assigned to a different test"
+ );
+}
diff --git a/testing/xpcshell/example/unit/test_prefs_defaults_and_file.js b/testing/xpcshell/example/unit/test_prefs_defaults_and_file.js
new file mode 100644
index 0000000000..8a56d49c34
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_prefs_defaults_and_file.js
@@ -0,0 +1,42 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Bug 1781025 - ESLint's use of multi-ini doesn't fully cope with
+// mozilla-central's use of .ini files
+// eslint-disable-next-line no-unused-vars
+function run_test() {
+ /* import-globals-from prefs_test_common.js */
+ load("prefs_test_common.js");
+
+ check_common_xpcshell_with_prefs();
+
+ Assert.ok(
+ has_pref_from_manifest_file_section(),
+ "Should have pref set for file in xpcshell-with-prefs.ini"
+ );
+
+ Assert.equal(
+ Services.prefs.getIntPref("dummy.pref.from.test.file"),
+ 2,
+ "Value of pref that was set once at the file in xpcshell-with-prefs.ini"
+ );
+
+ Assert.equal(
+ Services.prefs.getStringPref("dummy.pref.from.test.duplicate"),
+ "final",
+ "The last pref takes precedence when duplicated"
+ );
+
+ Assert.equal(
+ Services.prefs.getIntPref("dummy.pref.from.test.manifest"),
+ 1337,
+ "File-specific pref takes precedence over manifest defaults"
+ );
+
+ Assert.equal(
+ Services.prefs.getStringPref("dummy.pref.from.test.ancestor"),
+ "Ancestor",
+ "Pref in manifest defaults without file-specific override should be set"
+ );
+}
diff --git a/testing/xpcshell/example/unit/test_prefs_defaults_included.js b/testing/xpcshell/example/unit/test_prefs_defaults_included.js
new file mode 100644
index 0000000000..c092faf5c3
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_prefs_defaults_included.js
@@ -0,0 +1,16 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ /* import-globals-from prefs_test_common.js */
+ load("prefs_test_common.js");
+
+ check_common_xpcshell_with_prefs();
+
+ Assert.equal(
+ Services.prefs.getStringPref("dummy.pref.from.test.ancestor"),
+ "ReplacedParent",
+ "Pref set in included test manifest takes precedence over ancestor"
+ );
+}
diff --git a/testing/xpcshell/example/unit/test_prefs_no_defaults.js b/testing/xpcshell/example/unit/test_prefs_no_defaults.js
new file mode 100644
index 0000000000..f4516158b3
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_prefs_no_defaults.js
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ /* import-globals-from prefs_test_common.js */
+ load("prefs_test_common.js");
+
+ check_common_xpcshell_without_prefs();
+
+ Assert.ok(
+ !has_pref_from_manifest_file_section(),
+ "Should not have pref that was only assigned to a different test"
+ );
+}
diff --git a/testing/xpcshell/example/unit/test_prefs_no_defaults_with_file.js b/testing/xpcshell/example/unit/test_prefs_no_defaults_with_file.js
new file mode 100644
index 0000000000..e098b5d05e
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_prefs_no_defaults_with_file.js
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ /* import-globals-from prefs_test_common.js */
+ load("prefs_test_common.js");
+
+ check_common_xpcshell_without_prefs();
+
+ Assert.ok(
+ has_pref_from_manifest_file_section(),
+ "Should have pref set for file in xpcshell.ini"
+ );
+}
diff --git a/testing/xpcshell/example/unit/test_profile.js b/testing/xpcshell/example/unit/test_profile.js
new file mode 100644
index 0000000000..f235eb72fa
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_profile.js
@@ -0,0 +1,11 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ /* import-globals-from check_profile.js */
+ load("check_profile.js");
+ check_do_get_profile(false);
+}
diff --git a/testing/xpcshell/example/unit/test_profile_afterChange.js b/testing/xpcshell/example/unit/test_profile_afterChange.js
new file mode 100644
index 0000000000..292cb00eba
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_profile_afterChange.js
@@ -0,0 +1,11 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ /* import-globals-from check_profile.js */
+ load("check_profile.js");
+ check_do_get_profile(true);
+}
diff --git a/testing/xpcshell/example/unit/test_sample.js b/testing/xpcshell/example/unit/test_sample.js
new file mode 100644
index 0000000000..f0aa3df7c6
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_sample.js
@@ -0,0 +1,21 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is the most basic testcase. It makes some trivial assertions,
+ * then sets a timeout, and exits the test harness when that timeout
+ * fires. This is meant to demonstrate that there is a complete event
+ * system available to test scripts.
+ * Available functions are described at:
+ * http://developer.mozilla.org/en/docs/Writing_xpcshell-based_unit_tests
+ */
+function run_test() {
+ Assert.equal(57, 57);
+ Assert.notEqual(1, 2);
+ Assert.ok(true);
+
+ do_test_pending();
+ do_timeout(100, do_test_finished);
+}
diff --git a/testing/xpcshell/example/unit/test_skip.js b/testing/xpcshell/example/unit/test_skip.js
new file mode 100644
index 0000000000..0c203cd82e
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_skip.js
@@ -0,0 +1,8 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ // This test expects to fail.
+ Assert.ok(false);
+}
diff --git a/testing/xpcshell/example/unit/test_tasks_skip.js b/testing/xpcshell/example/unit/test_tasks_skip.js
new file mode 100644
index 0000000000..99f3e8d2c2
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_tasks_skip.js
@@ -0,0 +1,21 @@
+"use strict";
+
+add_task(async function skipMeNot1() {
+ Assert.ok(true, "Well well well.");
+});
+
+add_task(async function skipMe1() {
+ Assert.ok(false, "Not skipped after all.");
+}).skip();
+
+add_task(async function skipMeNot2() {
+ Assert.ok(true, "Well well well.");
+});
+
+add_task(async function skipMeNot3() {
+ Assert.ok(true, "Well well well.");
+});
+
+add_task(async function skipMe2() {
+ Assert.ok(false, "Not skipped after all.");
+}).skip();
diff --git a/testing/xpcshell/example/unit/test_tasks_skipall.js b/testing/xpcshell/example/unit/test_tasks_skipall.js
new file mode 100644
index 0000000000..13290e58ba
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_tasks_skipall.js
@@ -0,0 +1,23 @@
+"use strict";
+
+/* eslint-disable mozilla/reject-addtask-only */
+
+add_task(async function skipMe1() {
+ Assert.ok(false, "Not skipped after all.");
+});
+
+add_task(async function skipMe2() {
+ Assert.ok(false, "Not skipped after all.");
+}).skip();
+
+add_task(async function skipMe3() {
+ Assert.ok(false, "Not skipped after all.");
+}).only();
+
+add_task(async function skipMeNot() {
+ Assert.ok(true, "Well well well.");
+}).only();
+
+add_task(async function skipMe4() {
+ Assert.ok(false, "Not skipped after all.");
+});
diff --git a/testing/xpcshell/example/unit/xpcshell-included-with-prefs.ini b/testing/xpcshell/example/unit/xpcshell-included-with-prefs.ini
new file mode 100644
index 0000000000..dc18700c82
--- /dev/null
+++ b/testing/xpcshell/example/unit/xpcshell-included-with-prefs.ini
@@ -0,0 +1,5 @@
+# This file is included by xpcshell-with-prefs.ini
+[DEFAULT]
+prefs = dummy.pref.from.test.ancestor=ReplacedParent
+
+[test_prefs_defaults_included.js]
diff --git a/testing/xpcshell/example/unit/xpcshell-with-prefs.ini b/testing/xpcshell/example/unit/xpcshell-with-prefs.ini
new file mode 100644
index 0000000000..4d5944b272
--- /dev/null
+++ b/testing/xpcshell/example/unit/xpcshell-with-prefs.ini
@@ -0,0 +1,16 @@
+[DEFAULT]
+head =
+support-files = prefs_test_common.js
+prefs =
+ dummy.pref.from.test.ancestor=Ancestor
+ dummy.pref.from.test.manifest=1
+
+[test_prefs_defaults.js]
+[test_prefs_defaults_and_file.js]
+prefs = # Multiple prefs, for additional test coverage over xpcshell.ini
+ dummy.pref.from.test.file=2
+ dummy.pref.from.test.duplicate=first
+ dummy.pref.from.test.duplicate=final
+ dummy.pref.from.test.manifest=1337 # overrides manifest
+
+[include:xpcshell-included-with-prefs.ini]
diff --git a/testing/xpcshell/example/unit/xpcshell.ini b/testing/xpcshell/example/unit/xpcshell.ini
new file mode 100644
index 0000000000..38e3c98363
--- /dev/null
+++ b/testing/xpcshell/example/unit/xpcshell.ini
@@ -0,0 +1,60 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+[DEFAULT]
+head =
+support-files =
+ subdir/file.txt
+ file.txt
+ import_module.sys.mjs
+ load_subscript.js
+ location_load.js
+ check_profile.js
+ prefs_test_common.js
+# NOTE: Do NOT set prefs here. If you do, move test_prefs_no_defaults.js and
+# test_prefs_no_defaults_with_file.js to a new file without a pref definitions.
+
+[test_add_setup.js]
+[test_check_nsIException.js]
+skip-if = os == 'win' && debug
+[test_check_nsIException_failing.js]
+fail-if = true
+skip-if = os == 'win' && debug
+
+[test_do_get_tempdir.js]
+[test_execute_soon.js]
+[test_get_file.js]
+[test_get_idle.js]
+[test_import_module.js]
+[test_load.js]
+[test_load_httpd_js.js]
+[test_location.js]
+[test_multiple_setups.js]
+[test_multiple_tasks.js]
+[test_prefs_no_defaults.js]
+[test_prefs_no_defaults_with_file.js]
+prefs = dummy.pref.from.test.file=1
+[test_profile.js]
+[test_profile_afterChange.js]
+[test_sample.js]
+
+[test_fail.js]
+fail-if = true
+
+[test_skip.js]
+skip-if = true
+
+[test_do_check_null.js]
+skip-if = os == 'win' && debug
+
+[test_do_check_null_failing.js]
+fail-if = true
+skip-if = os == 'win' && debug
+
+[test_do_check_matches.js]
+[test_do_check_matches_failing.js]
+fail-if = true
+
+[test_tasks_skip.js]
+[test_tasks_skipall.js]
diff --git a/testing/xpcshell/head.js b/testing/xpcshell/head.js
new file mode 100644
index 0000000000..61f04a67bb
--- /dev/null
+++ b/testing/xpcshell/head.js
@@ -0,0 +1,1897 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file contains common code that is loaded before each test file(s).
+ * See https://developer.mozilla.org/en-US/docs/Mozilla/QA/Writing_xpcshell-based_unit_tests
+ * for more information.
+ */
+
+/* defined by the harness */
+/* globals _HEAD_FILES, _HEAD_JS_PATH, _JSDEBUGGER_PORT, _JSCOV_DIR,
+ _MOZINFO_JS_PATH, _TEST_FILE, _TEST_NAME, _TEST_CWD, _TESTING_MODULES_DIR:true,
+ _PREFS_FILE */
+
+/* defined by XPCShellImpl.cpp */
+/* globals load, sendCommand, changeTestShellDir */
+
+/* must be defined by tests using do_await_remote_message/do_send_remote_message */
+/* globals Cc, Ci */
+
+/* defined by this file but is defined as read-only for tests */
+// eslint-disable-next-line no-redeclare
+/* globals runningInParent: true */
+
+/* may be defined in test files */
+/* globals run_test */
+
+var _quit = false;
+var _passed = true;
+var _tests_pending = 0;
+var _cleanupFunctions = [];
+var _pendingTimers = [];
+var _profileInitialized = false;
+var _fastShutdownDisabled = false;
+
+// Assigned in do_load_child_test_harness.
+var _XPCSHELL_PROCESS;
+
+// Register the testing-common resource protocol early, to have access to its
+// modules.
+let _Services = Services;
+_register_modules_protocol_handler();
+
+let { AppConstants: _AppConstants } = ChromeUtils.importESModule(
+ "resource://gre/modules/AppConstants.sys.mjs"
+);
+
+let { PromiseTestUtils: _PromiseTestUtils } = ChromeUtils.importESModule(
+ "resource://testing-common/PromiseTestUtils.sys.mjs"
+);
+
+let { NetUtil: _NetUtil } = ChromeUtils.import(
+ "resource://gre/modules/NetUtil.jsm"
+);
+
+let { XPCOMUtils: _XPCOMUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/XPCOMUtils.sys.mjs"
+);
+
+// Support a common assertion library, Assert.sys.mjs.
+var { Assert: AssertCls } = ChromeUtils.importESModule(
+ "resource://testing-common/Assert.sys.mjs"
+);
+
+// Pass a custom report function for xpcshell-test style reporting.
+var Assert = new AssertCls(function(err, message, stack) {
+ if (err) {
+ do_report_result(false, err.message, err.stack);
+ } else {
+ do_report_result(true, message, stack);
+ }
+}, true);
+
+// Bug 1506134 for followup. Some xpcshell tests use ContentTask.sys.mjs, which
+// expects browser-test.js to have set a testScope that includes record.
+function record(condition, name, diag, stack) {
+ do_report_result(condition, name, stack);
+}
+
+var _add_params = function(params) {
+ if (typeof _XPCSHELL_PROCESS != "undefined") {
+ params.xpcshell_process = _XPCSHELL_PROCESS;
+ }
+};
+
+var _dumpLog = function(raw_msg) {
+ dump("\n" + JSON.stringify(raw_msg) + "\n");
+};
+
+var { StructuredLogger: _LoggerClass } = ChromeUtils.importESModule(
+ "resource://testing-common/StructuredLog.sys.mjs"
+);
+var _testLogger = new _LoggerClass("xpcshell/head.js", _dumpLog, [_add_params]);
+
+// Disable automatic network detection, so tests work correctly when
+// not connected to a network.
+_Services.io.manageOfflineStatus = false;
+_Services.io.offline = false;
+
+// Determine if we're running on parent or child
+var runningInParent = true;
+try {
+ // Don't use Services.appinfo here as it disables replacing appinfo with stubs
+ // for test usage.
+ runningInParent =
+ // eslint-disable-next-line mozilla/use-services
+ Cc["@mozilla.org/xre/runtime;1"].getService(Ci.nsIXULRuntime).processType ==
+ Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT;
+} catch (e) {}
+
+// Only if building of places is enabled.
+if (runningInParent && "mozIAsyncHistory" in Ci) {
+ // Ensure places history is enabled for xpcshell-tests as some non-FF
+ // apps disable it.
+ _Services.prefs.setBoolPref("places.history.enabled", true);
+}
+
+// Configure crash reporting, if possible
+// We rely on the Python harness to set MOZ_CRASHREPORTER,
+// MOZ_CRASHREPORTER_NO_REPORT, and handle checking for minidumps.
+// Note that if we're in a child process, we don't want to init the
+// crashreporter component.
+try {
+ if (runningInParent && "@mozilla.org/toolkit/crash-reporter;1" in Cc) {
+ // Intentially access the crash reporter service directly for this.
+ // eslint-disable-next-line mozilla/use-services
+ let crashReporter = Cc["@mozilla.org/toolkit/crash-reporter;1"].getService(
+ Ci.nsICrashReporter
+ );
+ crashReporter.UpdateCrashEventsDir();
+ crashReporter.minidumpPath = do_get_minidumpdir();
+ }
+} catch (e) {}
+
+if (runningInParent) {
+ _Services.prefs.setBoolPref("dom.push.connection.enabled", false);
+}
+
+// Configure a console listener so messages sent to it are logged as part
+// of the test.
+try {
+ let levelNames = {};
+ for (let level of ["debug", "info", "warn", "error"]) {
+ levelNames[Ci.nsIConsoleMessage[level]] = level;
+ }
+
+ let listener = {
+ QueryInterface: ChromeUtils.generateQI(["nsIConsoleListener"]),
+ observe(msg) {
+ if (typeof info === "function") {
+ info(
+ "CONSOLE_MESSAGE: (" +
+ levelNames[msg.logLevel] +
+ ") " +
+ msg.toString()
+ );
+ }
+ },
+ };
+ // Don't use _Services.console here as it causes one of the devtools tests
+ // to fail, probably due to initializing Services.console too early.
+ // eslint-disable-next-line mozilla/use-services
+ Cc["@mozilla.org/consoleservice;1"]
+ .getService(Ci.nsIConsoleService)
+ .registerListener(listener);
+} catch (e) {}
+/**
+ * Date.now() is not necessarily monotonically increasing (insert sob story
+ * about times not being the right tool to use for measuring intervals of time,
+ * robarnold can tell all), so be wary of error by erring by at least
+ * _timerFuzz ms.
+ */
+const _timerFuzz = 15;
+
+function _Timer(func, delay) {
+ delay = Number(delay);
+ if (delay < 0) {
+ do_throw("do_timeout() delay must be nonnegative");
+ }
+
+ if (typeof func !== "function") {
+ do_throw("string callbacks no longer accepted; use a function!");
+ }
+
+ this._func = func;
+ this._start = Date.now();
+ this._delay = delay;
+
+ var timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ timer.initWithCallback(this, delay + _timerFuzz, timer.TYPE_ONE_SHOT);
+
+ // Keep timer alive until it fires
+ _pendingTimers.push(timer);
+}
+_Timer.prototype = {
+ QueryInterface: ChromeUtils.generateQI(["nsITimerCallback"]),
+
+ notify(timer) {
+ _pendingTimers.splice(_pendingTimers.indexOf(timer), 1);
+
+ // The current nsITimer implementation can undershoot, but even if it
+ // couldn't, paranoia is probably a virtue here given the potential for
+ // random orange on tinderboxen.
+ var end = Date.now();
+ var elapsed = end - this._start;
+ if (elapsed >= this._delay) {
+ try {
+ this._func.call(null);
+ } catch (e) {
+ do_throw("exception thrown from do_timeout callback: " + e);
+ }
+ return;
+ }
+
+ // Timer undershot, retry with a little overshoot to try to avoid more
+ // undershoots.
+ var newDelay = this._delay - elapsed;
+ do_timeout(newDelay, this._func);
+ },
+};
+
+function _isGenerator(val) {
+ return typeof val === "object" && val && typeof val.next === "function";
+}
+
+function _do_main() {
+ if (_quit) {
+ return;
+ }
+
+ _testLogger.info("running event loop");
+
+ var tm = Cc["@mozilla.org/thread-manager;1"].getService();
+
+ tm.spinEventLoopUntil("Test(xpcshell/head.js:_do_main)", () => _quit);
+
+ tm.spinEventLoopUntilEmpty();
+}
+
+function _do_quit() {
+ _testLogger.info("exiting test");
+ _quit = true;
+}
+
+// This is useless, except to the extent that it has the side-effect of
+// initializing the widget module, which some tests unfortunately
+// accidentally rely on.
+void Cc["@mozilla.org/widget/transferable;1"].createInstance();
+
+/**
+ * Overrides idleService with a mock. Idle is commonly used for maintenance
+ * tasks, thus if a test uses a service that requires the idle service, it will
+ * start handling them.
+ * This behaviour would cause random failures and slowdown tests execution,
+ * for example by running database vacuum or cleanups for each test.
+ *
+ * @note Idle service is overridden by default. If a test requires it, it will
+ * have to call do_get_idle() function at least once before use.
+ */
+var _fakeIdleService = {
+ get registrar() {
+ delete this.registrar;
+ return (this.registrar = Components.manager.QueryInterface(
+ Ci.nsIComponentRegistrar
+ ));
+ },
+ contractID: "@mozilla.org/widget/useridleservice;1",
+ CID: Components.ID("{9163a4ae-70c2-446c-9ac1-bbe4ab93004e}"),
+
+ activate: function FIS_activate() {
+ if (!this.originalCID) {
+ this.originalCID = this.registrar.contractIDToCID(this.contractID);
+ // Replace with the mock.
+ this.registrar.registerFactory(
+ this.CID,
+ "Fake Idle Service",
+ this.contractID,
+ this.factory
+ );
+ }
+ },
+
+ deactivate: function FIS_deactivate() {
+ if (this.originalCID) {
+ // Unregister the mock.
+ this.registrar.unregisterFactory(this.CID, this.factory);
+ // Restore original factory.
+ this.registrar.registerFactory(
+ this.originalCID,
+ "Idle Service",
+ this.contractID,
+ null
+ );
+ delete this.originalCID;
+ }
+ },
+
+ factory: {
+ // nsIFactory
+ createInstance(aIID) {
+ return _fakeIdleService.QueryInterface(aIID);
+ },
+ QueryInterface: ChromeUtils.generateQI(["nsIFactory"]),
+ },
+
+ // nsIUserIdleService
+ get idleTime() {
+ return 0;
+ },
+ addIdleObserver() {},
+ removeIdleObserver() {},
+
+ // eslint-disable-next-line mozilla/use-chromeutils-generateqi
+ QueryInterface(aIID) {
+ // Useful for testing purposes, see test_get_idle.js.
+ if (aIID.equals(Ci.nsIFactory)) {
+ return this.factory;
+ }
+ if (aIID.equals(Ci.nsIUserIdleService) || aIID.equals(Ci.nsISupports)) {
+ return this;
+ }
+ throw Components.Exception("", Cr.NS_ERROR_NO_INTERFACE);
+ },
+};
+
+/**
+ * Restores the idle service factory if needed and returns the service's handle.
+ * @return A handle to the idle service.
+ */
+function do_get_idle() {
+ _fakeIdleService.deactivate();
+ return Cc[_fakeIdleService.contractID].getService(Ci.nsIUserIdleService);
+}
+
+// Map resource://test/ to current working directory and
+// resource://testing-common/ to the shared test modules directory.
+function _register_protocol_handlers() {
+ let protocolHandler = _Services.io
+ .getProtocolHandler("resource")
+ .QueryInterface(Ci.nsIResProtocolHandler);
+
+ let curDirURI = _Services.io.newFileURI(do_get_cwd());
+ protocolHandler.setSubstitution("test", curDirURI);
+
+ _register_modules_protocol_handler();
+}
+
+function _register_modules_protocol_handler() {
+ if (!_TESTING_MODULES_DIR) {
+ throw new Error(
+ "Please define a path where the testing modules can be " +
+ "found in a variable called '_TESTING_MODULES_DIR' before " +
+ "head.js is included."
+ );
+ }
+
+ let protocolHandler = _Services.io
+ .getProtocolHandler("resource")
+ .QueryInterface(Ci.nsIResProtocolHandler);
+
+ let modulesFile = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ modulesFile.initWithPath(_TESTING_MODULES_DIR);
+
+ if (!modulesFile.exists()) {
+ throw new Error(
+ "Specified modules directory does not exist: " + _TESTING_MODULES_DIR
+ );
+ }
+
+ if (!modulesFile.isDirectory()) {
+ throw new Error(
+ "Specified modules directory is not a directory: " + _TESTING_MODULES_DIR
+ );
+ }
+
+ let modulesURI = _Services.io.newFileURI(modulesFile);
+
+ protocolHandler.setSubstitution("testing-common", modulesURI);
+}
+
+/* Debugging support */
+// Used locally and by our self-tests.
+function _setupDevToolsServer(breakpointFiles, callback) {
+ // Always allow remote debugging.
+ _Services.prefs.setBoolPref("devtools.debugger.remote-enabled", true);
+
+ // for debugging-the-debugging, let an env var cause log spew.
+ if (_Services.env.get("DEVTOOLS_DEBUGGER_LOG")) {
+ _Services.prefs.setBoolPref("devtools.debugger.log", true);
+ }
+ if (_Services.env.get("DEVTOOLS_DEBUGGER_LOG_VERBOSE")) {
+ _Services.prefs.setBoolPref("devtools.debugger.log.verbose", true);
+ }
+
+ let require;
+ try {
+ ({ require } = ChromeUtils.importESModule(
+ "resource://devtools/shared/loader/Loader.sys.mjs"
+ ));
+ } catch (e) {
+ throw new Error(
+ "resource://devtools appears to be inaccessible from the " +
+ "xpcshell environment.\n" +
+ "This can usually be resolved by adding:\n" +
+ " firefox-appdir = browser\n" +
+ "to the xpcshell.ini manifest.\n" +
+ "It is possible for this to alter test behevior by " +
+ "triggering additional browser code to run, so check " +
+ "test behavior after making this change.\n" +
+ "See also https://bugzil.la/1215378."
+ );
+ }
+ let { DevToolsServer } = require("devtools/server/devtools-server");
+ DevToolsServer.init();
+ DevToolsServer.registerAllActors();
+ let { createRootActor } = require("resource://testing-common/dbg-actors.js");
+ DevToolsServer.setRootActor(createRootActor);
+ DevToolsServer.allowChromeProcess = true;
+
+ const TOPICS = [
+ // An observer notification that tells us when the thread actor is ready
+ // and can accept breakpoints.
+ "devtools-thread-ready",
+ // Or when devtools are destroyed and we should stop observing.
+ "xpcshell-test-devtools-shutdown",
+ ];
+ let observe = function(subject, topic, data) {
+ if (topic === "devtools-thread-ready") {
+ const threadActor = subject.wrappedJSObject;
+ threadActor.setBreakpointOnLoad(breakpointFiles);
+ }
+
+ for (let topicToRemove of TOPICS) {
+ _Services.obs.removeObserver(observe, topicToRemove);
+ }
+ callback();
+ };
+
+ for (let topic of TOPICS) {
+ _Services.obs.addObserver(observe, topic);
+ }
+
+ const { SocketListener } = require("devtools/shared/security/socket");
+
+ return { DevToolsServer, SocketListener };
+}
+
+function _initDebugging(port) {
+ let initialized = false;
+ const { DevToolsServer, SocketListener } = _setupDevToolsServer(
+ _TEST_FILE,
+ () => {
+ initialized = true;
+ }
+ );
+
+ info("");
+ info("*******************************************************************");
+ info("Waiting for the debugger to connect on port " + port);
+ info("");
+ info("To connect the debugger, open a Firefox instance, select 'Connect'");
+ info("from the Developer menu and specify the port as " + port);
+ info("*******************************************************************");
+ info("");
+
+ const AuthenticatorType = DevToolsServer.Authenticators.get("PROMPT");
+ const authenticator = new AuthenticatorType.Server();
+ authenticator.allowConnection = () => {
+ return DevToolsServer.AuthenticationResult.ALLOW;
+ };
+ const socketOptions = {
+ authenticator,
+ portOrPath: port,
+ };
+
+ const listener = new SocketListener(DevToolsServer, socketOptions);
+ listener.open();
+
+ // spin an event loop until the debugger connects.
+ const tm = Cc["@mozilla.org/thread-manager;1"].getService();
+ tm.spinEventLoopUntil("Test(xpcshell/head.js:_initDebugging)", () => {
+ if (initialized) {
+ return true;
+ }
+ info("Still waiting for debugger to connect...");
+ return false;
+ });
+ // NOTE: if you want to debug the harness itself, you can now add a 'debugger'
+ // statement anywhere and it will stop - but we've already added a breakpoint
+ // for the first line of the test scripts, so we just continue...
+ info("Debugger connected, starting test execution");
+}
+
+function _execute_test() {
+ if (typeof _TEST_CWD != "undefined") {
+ try {
+ changeTestShellDir(_TEST_CWD);
+ } catch (e) {
+ _testLogger.error(_exception_message(e));
+ }
+ }
+ if (runningInParent && _AppConstants.platform == "android") {
+ try {
+ // GeckoView initialization needs the profile
+ do_get_profile(true);
+ // Wake up GeckoViewStartup
+ let geckoViewStartup = Cc["@mozilla.org/geckoview/startup;1"].getService(
+ Ci.nsIObserver
+ );
+ geckoViewStartup.observe(null, "profile-after-change", null);
+ geckoViewStartup.observe(null, "app-startup", null);
+
+ // Glean needs to be initialized for metric recording & tests to work.
+ // Usually this happens through Glean Kotlin,
+ // but for xpcshell tests we initialize it from here.
+ _Services.fog.initializeFOG();
+ } catch (ex) {
+ do_throw(`Failed to initialize GeckoView: ${ex}`, ex.stack);
+ }
+ }
+
+ // _JSDEBUGGER_PORT is dynamically defined by <runxpcshelltests.py>.
+ if (_JSDEBUGGER_PORT) {
+ try {
+ _initDebugging(_JSDEBUGGER_PORT);
+ } catch (ex) {
+ // Fail the test run immediately if debugging is requested but fails, so
+ // that the failure state is more obvious.
+ do_throw(`Failed to initialize debugging: ${ex}`, ex.stack);
+ }
+ }
+
+ _register_protocol_handlers();
+
+ // Override idle service by default.
+ // Call do_get_idle() to restore the factory and get the service.
+ _fakeIdleService.activate();
+
+ _PromiseTestUtils.init();
+
+ let coverageCollector = null;
+ if (typeof _JSCOV_DIR === "string") {
+ let _CoverageCollector = ChromeUtils.importESModule(
+ "resource://testing-common/CoverageUtils.sys.mjs"
+ ).CoverageCollector;
+ coverageCollector = new _CoverageCollector(_JSCOV_DIR);
+ }
+
+ let startTime = Cu.now();
+
+ // _HEAD_FILES is dynamically defined by <runxpcshelltests.py>.
+ _load_files(_HEAD_FILES);
+ // _TEST_FILE is dynamically defined by <runxpcshelltests.py>.
+ _load_files(_TEST_FILE);
+
+ // Tack Assert.sys.mjs methods to the current scope.
+ this.Assert = Assert;
+ for (let func in Assert) {
+ this[func] = Assert[func].bind(Assert);
+ }
+
+ const { PerTestCoverageUtils } = ChromeUtils.import(
+ "resource://testing-common/PerTestCoverageUtils.jsm"
+ );
+
+ if (runningInParent) {
+ PerTestCoverageUtils.beforeTestSync();
+ }
+
+ try {
+ do_test_pending("MAIN run_test");
+ // Check if run_test() is defined. If defined, run it.
+ // Else, call run_next_test() directly to invoke tests
+ // added by add_test() and add_task().
+ if (typeof run_test === "function") {
+ run_test();
+ } else {
+ run_next_test();
+ }
+
+ do_test_finished("MAIN run_test");
+ _do_main();
+ _PromiseTestUtils.assertNoUncaughtRejections();
+
+ if (coverageCollector != null) {
+ coverageCollector.recordTestCoverage(_TEST_FILE[0]);
+ }
+
+ if (runningInParent) {
+ PerTestCoverageUtils.afterTestSync();
+ }
+ } catch (e) {
+ _passed = false;
+ // do_check failures are already logged and set _quit to true and throw
+ // NS_ERROR_ABORT. If both of those are true it is likely this exception
+ // has already been logged so there is no need to log it again. It's
+ // possible that this will mask an NS_ERROR_ABORT that happens after a
+ // do_check failure though.
+
+ if (!_quit || e.result != Cr.NS_ERROR_ABORT) {
+ let extra = {};
+ if (e.fileName) {
+ extra.source_file = e.fileName;
+ if (e.lineNumber) {
+ extra.line_number = e.lineNumber;
+ }
+ } else {
+ extra.source_file = "xpcshell/head.js";
+ }
+ let message = _exception_message(e);
+ if (e.stack) {
+ extra.stack = _format_stack(e.stack);
+ }
+ _testLogger.error(message, extra);
+ }
+ } finally {
+ if (coverageCollector != null) {
+ coverageCollector.finalize();
+ }
+ }
+
+ // Execute all of our cleanup functions.
+ let reportCleanupError = function(ex) {
+ let stack, filename;
+ if (ex && typeof ex == "object" && "stack" in ex) {
+ stack = ex.stack;
+ } else {
+ stack = Components.stack.caller;
+ }
+ if (stack instanceof Ci.nsIStackFrame) {
+ filename = stack.filename;
+ } else if (ex.fileName) {
+ filename = ex.fileName;
+ }
+ _testLogger.error(_exception_message(ex), {
+ stack: _format_stack(stack),
+ source_file: filename,
+ });
+ };
+
+ let complete = !_cleanupFunctions.length;
+ let cleanupStartTime = complete ? 0 : Cu.now();
+ (async () => {
+ for (let func of _cleanupFunctions.reverse()) {
+ try {
+ let result = await func();
+ if (_isGenerator(result)) {
+ Assert.ok(false, "Cleanup function returned a generator");
+ }
+ } catch (ex) {
+ reportCleanupError(ex);
+ }
+ }
+ _cleanupFunctions = [];
+ })()
+ .catch(reportCleanupError)
+ .then(() => (complete = true));
+ _Services.tm.spinEventLoopUntil(
+ "Test(xpcshell/head.js:_execute_test)",
+ () => complete
+ );
+ if (cleanupStartTime) {
+ ChromeUtils.addProfilerMarker(
+ "xpcshell-test",
+ { category: "Test", startTime: cleanupStartTime },
+ "Cleanup functions"
+ );
+ }
+
+ ChromeUtils.addProfilerMarker(
+ "xpcshell-test",
+ { category: "Test", startTime },
+ _TEST_NAME
+ );
+ _Services.obs.notifyObservers(null, "test-complete");
+
+ // Restore idle service to avoid leaks.
+ _fakeIdleService.deactivate();
+
+ if (
+ globalThis.hasOwnProperty("storage") &&
+ StorageManager.isInstance(globalThis.storage)
+ ) {
+ globalThis.storage.shutdown();
+ }
+
+ if (_profileInitialized) {
+ // Since we have a profile, we will notify profile shutdown topics at
+ // the end of the current test, to ensure correct cleanup on shutdown.
+ _Services.startup.advanceShutdownPhase(
+ _Services.startup.SHUTDOWN_PHASE_APPSHUTDOWNNETTEARDOWN
+ );
+ _Services.startup.advanceShutdownPhase(
+ _Services.startup.SHUTDOWN_PHASE_APPSHUTDOWNTEARDOWN
+ );
+ _Services.startup.advanceShutdownPhase(
+ _Services.startup.SHUTDOWN_PHASE_APPSHUTDOWN
+ );
+ _Services.startup.advanceShutdownPhase(
+ _Services.startup.SHUTDOWN_PHASE_APPSHUTDOWNQM
+ );
+
+ _profileInitialized = false;
+ }
+
+ try {
+ _PromiseTestUtils.ensureDOMPromiseRejectionsProcessed();
+ _PromiseTestUtils.assertNoUncaughtRejections();
+ _PromiseTestUtils.assertNoMoreExpectedRejections();
+ } finally {
+ // It's important to terminate the module to avoid crashes on shutdown.
+ _PromiseTestUtils.uninit();
+ }
+
+ // Skip the normal shutdown path for optimized builds that don't do leak checking.
+ if (
+ runningInParent &&
+ !_AppConstants.RELEASE_OR_BETA &&
+ !_AppConstants.DEBUG &&
+ !_AppConstants.MOZ_CODE_COVERAGE &&
+ !_AppConstants.ASAN &&
+ !_AppConstants.TSAN
+ ) {
+ if (_fastShutdownDisabled) {
+ _testLogger.info("fast shutdown disabled by the test.");
+ return;
+ }
+
+ // Setting this pref is required for Cu.isInAutomation to return true.
+ _Services.prefs.setBoolPref(
+ "security.turn_off_all_security_so_that_viruses_can_take_over_this_computer",
+ true
+ );
+ Cu.exitIfInAutomation();
+ }
+}
+
+/**
+ * Loads files.
+ *
+ * @param aFiles Array of files to load.
+ */
+function _load_files(aFiles) {
+ function load_file(element, index, array) {
+ try {
+ let startTime = Cu.now();
+ load(element);
+ ChromeUtils.addProfilerMarker(
+ "load_file",
+ { category: "Test", startTime },
+ element.replace(/.*\/_?tests\/xpcshell\//, "")
+ );
+ } catch (e) {
+ let extra = {
+ source_file: element,
+ };
+ if (e.stack) {
+ extra.stack = _format_stack(e.stack);
+ }
+ _testLogger.error(_exception_message(e), extra);
+ }
+ }
+
+ aFiles.forEach(load_file);
+}
+
+function _wrap_with_quotes_if_necessary(val) {
+ return typeof val == "string" ? '"' + val + '"' : val;
+}
+
+/* ************* Functions to be used from the tests ************* */
+
+/**
+ * Prints a message to the output log.
+ */
+function info(msg, data) {
+ ChromeUtils.addProfilerMarker("INFO", { category: "Test" }, msg);
+ msg = _wrap_with_quotes_if_necessary(msg);
+ data = data ? data : null;
+ _testLogger.info(msg, data);
+}
+
+/**
+ * Calls the given function at least the specified number of milliseconds later.
+ * The callback will not undershoot the given time, but it might overshoot --
+ * don't expect precision!
+ *
+ * @param delay : uint
+ * the number of milliseconds to delay
+ * @param callback : function() : void
+ * the function to call
+ */
+function do_timeout(delay, func) {
+ new _Timer(func, Number(delay));
+}
+
+function executeSoon(callback, aName) {
+ let funcName = aName ? aName : callback.name;
+ do_test_pending(funcName);
+
+ _Services.tm.dispatchToMainThread({
+ run() {
+ try {
+ callback();
+ } catch (e) {
+ // do_check failures are already logged and set _quit to true and throw
+ // NS_ERROR_ABORT. If both of those are true it is likely this exception
+ // has already been logged so there is no need to log it again. It's
+ // possible that this will mask an NS_ERROR_ABORT that happens after a
+ // do_check failure though.
+ if (!_quit || e.result != Cr.NS_ERROR_ABORT) {
+ let stack = e.stack ? _format_stack(e.stack) : null;
+ _testLogger.testStatus(
+ _TEST_NAME,
+ funcName,
+ "FAIL",
+ "PASS",
+ _exception_message(e),
+ stack
+ );
+ _do_quit();
+ }
+ } finally {
+ do_test_finished(funcName);
+ }
+ },
+ });
+}
+
+/**
+ * Shows an error message and the current stack and aborts the test.
+ *
+ * @param error A message string or an Error object.
+ * @param stack null or nsIStackFrame object or a string containing
+ * \n separated stack lines (as in Error().stack).
+ */
+function do_throw(error, stack) {
+ let filename = "";
+ // If we didn't get passed a stack, maybe the error has one
+ // otherwise get it from our call context
+ stack = stack || error.stack || Components.stack.caller;
+
+ if (stack instanceof Ci.nsIStackFrame) {
+ filename = stack.filename;
+ } else if (error.fileName) {
+ filename = error.fileName;
+ }
+
+ _testLogger.error(_exception_message(error), {
+ source_file: filename,
+ stack: _format_stack(stack),
+ });
+ _abort_failed_test();
+}
+
+function _abort_failed_test() {
+ // Called to abort the test run after all failures are logged.
+ _passed = false;
+ _do_quit();
+ throw Components.Exception("", Cr.NS_ERROR_ABORT);
+}
+
+function _format_stack(stack) {
+ let normalized;
+ if (stack instanceof Ci.nsIStackFrame) {
+ let frames = [];
+ for (let frame = stack; frame; frame = frame.caller) {
+ frames.push(frame.filename + ":" + frame.name + ":" + frame.lineNumber);
+ }
+ normalized = frames.join("\n");
+ } else {
+ normalized = "" + stack;
+ }
+ return normalized;
+}
+
+// Make a nice display string from an object that behaves
+// like Error
+function _exception_message(ex) {
+ let message = "";
+ if (ex.name) {
+ message = ex.name + ": ";
+ }
+ if (ex.message) {
+ message += ex.message;
+ }
+ if (ex.fileName) {
+ message += " at " + ex.fileName;
+ if (ex.lineNumber) {
+ message += ":" + ex.lineNumber;
+ }
+ }
+ if (message !== "") {
+ return message;
+ }
+ // Force ex to be stringified
+ return "" + ex;
+}
+
+function do_report_unexpected_exception(ex, text) {
+ let filename = Components.stack.caller.filename;
+ text = text ? text + " - " : "";
+
+ _passed = false;
+ _testLogger.error(text + "Unexpected exception " + _exception_message(ex), {
+ source_file: filename,
+ stack: _format_stack(ex.stack),
+ });
+ _do_quit();
+ throw Components.Exception("", Cr.NS_ERROR_ABORT);
+}
+
+function do_note_exception(ex, text) {
+ let filename = Components.stack.caller.filename;
+ _testLogger.info(text + "Swallowed exception " + _exception_message(ex), {
+ source_file: filename,
+ stack: _format_stack(ex.stack),
+ });
+}
+
+function do_report_result(passed, text, stack, todo) {
+ // Match names like head.js, head_foo.js, and foo_head.js, but not
+ // test_headache.js
+ while (/(\/head(_.+)?|head)\.js$/.test(stack.filename) && stack.caller) {
+ stack = stack.caller;
+ }
+
+ let name = _gRunningTest ? _gRunningTest.name : stack.name;
+ let message;
+ if (name) {
+ message = "[" + name + " : " + stack.lineNumber + "] " + text;
+ } else {
+ message = text;
+ }
+
+ if (passed) {
+ if (todo) {
+ _testLogger.testStatus(
+ _TEST_NAME,
+ name,
+ "PASS",
+ "FAIL",
+ message,
+ _format_stack(stack)
+ );
+ _abort_failed_test();
+ } else {
+ _testLogger.testStatus(_TEST_NAME, name, "PASS", "PASS", message);
+ }
+ } else if (todo) {
+ _testLogger.testStatus(_TEST_NAME, name, "FAIL", "FAIL", message);
+ } else {
+ _testLogger.testStatus(
+ _TEST_NAME,
+ name,
+ "FAIL",
+ "PASS",
+ message,
+ _format_stack(stack)
+ );
+ _abort_failed_test();
+ }
+}
+
+function _do_check_eq(left, right, stack, todo) {
+ if (!stack) {
+ stack = Components.stack.caller;
+ }
+
+ var text =
+ _wrap_with_quotes_if_necessary(left) +
+ " == " +
+ _wrap_with_quotes_if_necessary(right);
+ do_report_result(left == right, text, stack, todo);
+}
+
+function todo_check_eq(left, right, stack) {
+ if (!stack) {
+ stack = Components.stack.caller;
+ }
+
+ _do_check_eq(left, right, stack, true);
+}
+
+function todo_check_true(condition, stack) {
+ if (!stack) {
+ stack = Components.stack.caller;
+ }
+
+ todo_check_eq(condition, true, stack);
+}
+
+function todo_check_false(condition, stack) {
+ if (!stack) {
+ stack = Components.stack.caller;
+ }
+
+ todo_check_eq(condition, false, stack);
+}
+
+function todo_check_null(condition, stack = Components.stack.caller) {
+ todo_check_eq(condition, null, stack);
+}
+
+// Check that |func| throws an nsIException that has
+// |Components.results[resultName]| as the value of its 'result' property.
+function do_check_throws_nsIException(
+ func,
+ resultName,
+ stack = Components.stack.caller,
+ todo = false
+) {
+ let expected = Cr[resultName];
+ if (typeof expected !== "number") {
+ do_throw(
+ "do_check_throws_nsIException requires a Components.results" +
+ " property name, not " +
+ uneval(resultName),
+ stack
+ );
+ }
+
+ let msg =
+ "do_check_throws_nsIException: func should throw" +
+ " an nsIException whose 'result' is Components.results." +
+ resultName;
+
+ try {
+ func();
+ } catch (ex) {
+ if (!(ex instanceof Ci.nsIException) || ex.result !== expected) {
+ do_report_result(
+ false,
+ msg + ", threw " + legible_exception(ex) + " instead",
+ stack,
+ todo
+ );
+ }
+
+ do_report_result(true, msg, stack, todo);
+ return;
+ }
+
+ // Call this here, not in the 'try' clause, so do_report_result's own
+ // throw doesn't get caught by our 'catch' clause.
+ do_report_result(false, msg + ", but returned normally", stack, todo);
+}
+
+// Produce a human-readable form of |exception|. This looks up
+// Components.results values, tries toString methods, and so on.
+function legible_exception(exception) {
+ switch (typeof exception) {
+ case "object":
+ if (exception instanceof Ci.nsIException) {
+ return "nsIException instance: " + uneval(exception.toString());
+ }
+ return exception.toString();
+
+ case "number":
+ for (let name in Cr) {
+ if (exception === Cr[name]) {
+ return "Components.results." + name;
+ }
+ }
+
+ // Fall through.
+ default:
+ return uneval(exception);
+ }
+}
+
+function do_check_instanceof(
+ value,
+ constructor,
+ stack = Components.stack.caller,
+ todo = false
+) {
+ do_report_result(
+ value instanceof constructor,
+ "value should be an instance of " + constructor.name,
+ stack,
+ todo
+ );
+}
+
+function todo_check_instanceof(
+ value,
+ constructor,
+ stack = Components.stack.caller
+) {
+ do_check_instanceof(value, constructor, stack, true);
+}
+
+function do_test_pending(aName) {
+ ++_tests_pending;
+
+ _testLogger.info(
+ "(xpcshell/head.js) | test" +
+ (aName ? " " + aName : "") +
+ " pending (" +
+ _tests_pending +
+ ")"
+ );
+}
+
+function do_test_finished(aName) {
+ _testLogger.info(
+ "(xpcshell/head.js) | test" +
+ (aName ? " " + aName : "") +
+ " finished (" +
+ _tests_pending +
+ ")"
+ );
+ if (--_tests_pending == 0) {
+ _do_quit();
+ }
+}
+
+function do_get_file(path, allowNonexistent) {
+ try {
+ let lf = _Services.dirsvc.get("CurWorkD", Ci.nsIFile);
+
+ let bits = path.split("/");
+ for (let i = 0; i < bits.length; i++) {
+ if (bits[i]) {
+ if (bits[i] == "..") {
+ lf = lf.parent;
+ } else {
+ lf.append(bits[i]);
+ }
+ }
+ }
+
+ if (!allowNonexistent && !lf.exists()) {
+ // Not using do_throw(): caller will continue.
+ _passed = false;
+ var stack = Components.stack.caller;
+ _testLogger.error(
+ "[" +
+ stack.name +
+ " : " +
+ stack.lineNumber +
+ "] " +
+ lf.path +
+ " does not exist"
+ );
+ }
+
+ return lf;
+ } catch (ex) {
+ do_throw(ex.toString(), Components.stack.caller);
+ }
+
+ return null;
+}
+
+// do_get_cwd() isn't exactly self-explanatory, so provide a helper
+function do_get_cwd() {
+ return do_get_file("");
+}
+
+function do_load_manifest(path) {
+ var lf = do_get_file(path);
+ const nsIComponentRegistrar = Ci.nsIComponentRegistrar;
+ Assert.ok(Components.manager instanceof nsIComponentRegistrar);
+ // Previous do_check_true() is not a test check.
+ Components.manager.autoRegister(lf);
+}
+
+/**
+ * Parse a DOM document.
+ *
+ * @param aPath File path to the document.
+ * @param aType Content type to use in DOMParser.
+ *
+ * @return Document from the file.
+ */
+function do_parse_document(aPath, aType) {
+ switch (aType) {
+ case "application/xhtml+xml":
+ case "application/xml":
+ case "text/xml":
+ break;
+
+ default:
+ do_throw(
+ "type: expected application/xhtml+xml, application/xml or text/xml," +
+ " got '" +
+ aType +
+ "'",
+ Components.stack.caller
+ );
+ }
+
+ let file = do_get_file(aPath);
+ let url = _Services.io.newFileURI(file).spec;
+ file = null;
+ return new Promise((resolve, reject) => {
+ let xhr = new XMLHttpRequest();
+ xhr.open("GET", url);
+ xhr.responseType = "document";
+ xhr.onerror = reject;
+ xhr.onload = () => {
+ resolve(xhr.response);
+ };
+ xhr.send();
+ });
+}
+
+/**
+ * Registers a function that will run when the test harness is done running all
+ * tests.
+ *
+ * @param aFunction
+ * The function to be called when the test harness has finished running.
+ */
+function registerCleanupFunction(aFunction) {
+ _cleanupFunctions.push(aFunction);
+}
+
+/**
+ * Ensure the test finishes with a normal shutdown even when it could have
+ * otherwise used the fast Cu.exitIfInAutomation shutdown.
+ */
+function do_disable_fast_shutdown() {
+ _fastShutdownDisabled = true;
+}
+
+/**
+ * Returns the directory for a temp dir, which is created by the
+ * test harness. Every test gets its own temp dir.
+ *
+ * @return nsIFile of the temporary directory
+ */
+function do_get_tempdir() {
+ // the python harness sets this in the environment for us
+ let path = _Services.env.get("XPCSHELL_TEST_TEMP_DIR");
+ let file = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ file.initWithPath(path);
+ return file;
+}
+
+/**
+ * Returns the directory for crashreporter minidumps.
+ *
+ * @return nsIFile of the minidump directory
+ */
+function do_get_minidumpdir() {
+ // the python harness may set this in the environment for us
+ let path = _Services.env.get("XPCSHELL_MINIDUMP_DIR");
+ if (path) {
+ let file = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ file.initWithPath(path);
+ return file;
+ }
+ return do_get_tempdir();
+}
+
+/**
+ * Registers a directory with the profile service,
+ * and return the directory as an nsIFile.
+ *
+ * @param notifyProfileAfterChange Whether to notify for "profile-after-change".
+ * @return nsIFile of the profile directory.
+ */
+function do_get_profile(notifyProfileAfterChange = false) {
+ if (!runningInParent) {
+ _testLogger.info("Ignoring profile creation from child process.");
+ return null;
+ }
+
+ // the python harness sets this in the environment for us
+ let profd = Services.env.get("XPCSHELL_TEST_PROFILE_DIR");
+ let file = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ file.initWithPath(profd);
+
+ let provider = {
+ getFile(prop, persistent) {
+ persistent.value = true;
+ if (
+ prop == "ProfD" ||
+ prop == "ProfLD" ||
+ prop == "ProfDS" ||
+ prop == "ProfLDS" ||
+ prop == "TmpD"
+ ) {
+ return file.clone();
+ }
+ return null;
+ },
+ QueryInterface: ChromeUtils.generateQI(["nsIDirectoryServiceProvider"]),
+ };
+ _Services.dirsvc
+ .QueryInterface(Ci.nsIDirectoryService)
+ .registerProvider(provider);
+
+ try {
+ _Services.dirsvc.undefine("TmpD");
+ } catch (e) {
+ // This throws if the key is not already registered, but that
+ // doesn't matter.
+ if (e.result != Cr.NS_ERROR_FAILURE) {
+ throw e;
+ }
+ }
+
+ // We need to update the crash events directory when the profile changes.
+ if (runningInParent && "@mozilla.org/toolkit/crash-reporter;1" in Cc) {
+ // Intentially access the crash reporter service directly for this.
+ // eslint-disable-next-line mozilla/use-services
+ let crashReporter = Cc["@mozilla.org/toolkit/crash-reporter;1"].getService(
+ Ci.nsICrashReporter
+ );
+ crashReporter.UpdateCrashEventsDir();
+ }
+
+ if (!_profileInitialized) {
+ _Services.obs.notifyObservers(
+ null,
+ "profile-do-change",
+ "xpcshell-do-get-profile"
+ );
+ _profileInitialized = true;
+ if (notifyProfileAfterChange) {
+ _Services.obs.notifyObservers(
+ null,
+ "profile-after-change",
+ "xpcshell-do-get-profile"
+ );
+ }
+ }
+
+ // The methods of 'provider' will retain this scope so null out everything
+ // to avoid spurious leak reports.
+ profd = null;
+ provider = null;
+ return file.clone();
+}
+
+/**
+ * This function loads head.js (this file) in the child process, so that all
+ * functions defined in this file (do_throw, etc) are available to subsequent
+ * sendCommand calls. It also sets various constants used by these functions.
+ *
+ * (Note that you may use sendCommand without calling this function first; you
+ * simply won't have any of the functions in this file available.)
+ */
+function do_load_child_test_harness() {
+ // Make sure this isn't called from child process
+ if (!runningInParent) {
+ do_throw("run_test_in_child cannot be called from child!");
+ }
+
+ // Allow to be called multiple times, but only run once
+ if (typeof do_load_child_test_harness.alreadyRun != "undefined") {
+ return;
+ }
+ do_load_child_test_harness.alreadyRun = 1;
+
+ _XPCSHELL_PROCESS = "parent";
+
+ let command =
+ "const _HEAD_JS_PATH=" +
+ uneval(_HEAD_JS_PATH) +
+ "; " +
+ "const _HEAD_FILES=" +
+ uneval(_HEAD_FILES) +
+ "; " +
+ "const _MOZINFO_JS_PATH=" +
+ uneval(_MOZINFO_JS_PATH) +
+ "; " +
+ "const _TEST_NAME=" +
+ uneval(_TEST_NAME) +
+ "; " +
+ // We'll need more magic to get the debugger working in the child
+ "const _JSDEBUGGER_PORT=0; " +
+ "_XPCSHELL_PROCESS='child';";
+
+ if (typeof _JSCOV_DIR === "string") {
+ command += " const _JSCOV_DIR=" + uneval(_JSCOV_DIR) + ";";
+ }
+
+ if (typeof _TEST_CWD != "undefined") {
+ command += " const _TEST_CWD=" + uneval(_TEST_CWD) + ";";
+ }
+
+ if (_TESTING_MODULES_DIR) {
+ command +=
+ " const _TESTING_MODULES_DIR=" + uneval(_TESTING_MODULES_DIR) + ";";
+ }
+
+ command += " load(_HEAD_JS_PATH);";
+ sendCommand(command);
+}
+
+/**
+ * Runs an entire xpcshell unit test in a child process (rather than in chrome,
+ * which is the default).
+ *
+ * This function returns immediately, before the test has completed.
+ *
+ * @param testFile
+ * The name of the script to run. Path format same as load().
+ * @param optionalCallback.
+ * Optional function to be called (in parent) when test on child is
+ * complete. If provided, the function must call do_test_finished();
+ * @return Promise Resolved when the test in the child is complete.
+ */
+function run_test_in_child(testFile, optionalCallback) {
+ return new Promise(resolve => {
+ var callback = () => {
+ resolve();
+ if (typeof optionalCallback == "undefined") {
+ do_test_finished();
+ } else {
+ optionalCallback();
+ }
+ };
+
+ do_load_child_test_harness();
+
+ var testPath = do_get_file(testFile).path.replace(/\\/g, "/");
+ do_test_pending("run in child");
+ sendCommand(
+ "_testLogger.info('CHILD-TEST-STARTED'); " +
+ "const _TEST_FILE=['" +
+ testPath +
+ "']; " +
+ "_execute_test(); " +
+ "_testLogger.info('CHILD-TEST-COMPLETED');",
+ callback
+ );
+ });
+}
+
+/**
+ * Execute a given function as soon as a particular cross-process message is received.
+ * Must be paired with do_send_remote_message or equivalent ProcessMessageManager calls.
+ *
+ * @param optionalCallback
+ * Optional callback that is invoked when the message is received. If provided,
+ * the function must call do_test_finished().
+ * @return Promise Promise that is resolved when the message is received.
+ */
+function do_await_remote_message(name, optionalCallback) {
+ return new Promise(resolve => {
+ var listener = {
+ receiveMessage(message) {
+ if (message.name == name) {
+ mm.removeMessageListener(name, listener);
+ resolve(message.data);
+ if (optionalCallback) {
+ optionalCallback(message.data);
+ } else {
+ do_test_finished();
+ }
+ }
+ },
+ };
+
+ var mm;
+ if (runningInParent) {
+ mm = Cc["@mozilla.org/parentprocessmessagemanager;1"].getService();
+ } else {
+ mm = Cc["@mozilla.org/childprocessmessagemanager;1"].getService();
+ }
+ do_test_pending();
+ mm.addMessageListener(name, listener);
+ });
+}
+
+/**
+ * Asynchronously send a message to all remote processes. Pairs with do_await_remote_message
+ * or equivalent ProcessMessageManager listeners.
+ */
+function do_send_remote_message(name, data) {
+ var mm;
+ var sender;
+ if (runningInParent) {
+ mm = Cc["@mozilla.org/parentprocessmessagemanager;1"].getService();
+ sender = "broadcastAsyncMessage";
+ } else {
+ mm = Cc["@mozilla.org/childprocessmessagemanager;1"].getService();
+ sender = "sendAsyncMessage";
+ }
+ mm[sender](name, data);
+}
+
+/**
+ * Schedules and awaits a precise GC, and forces CC, `maxCount` number of times.
+ * @param maxCount
+ * How many times GC and CC should be scheduled.
+ */
+async function schedulePreciseGCAndForceCC(maxCount) {
+ for (let count = 0; count < maxCount; count++) {
+ await new Promise(resolve => Cu.schedulePreciseGC(resolve));
+ Cu.forceCC();
+ }
+}
+
+/**
+ * Add a test function to the list of tests that are to be run asynchronously.
+ *
+ * @param funcOrProperties
+ * A function to be run or an object represents test properties.
+ * Supported properties:
+ * skip_if : An arrow function which has an expression to be
+ * evaluated whether the test is skipped or not.
+ * pref_set: An array of preferences to set for the test, reset at end of test.
+ * @param func
+ * A function to be run only if the funcOrProperies is not a function.
+ * @param isTask
+ * Optional flag that indicates whether `func` is a task. Defaults to `false`.
+ * @param isSetup
+ * Optional flag that indicates whether `func` is a setup task. Defaults to `false`.
+ * Implies isTask.
+ *
+ * Each test function must call run_next_test() when it's done. Test files
+ * should call run_next_test() in their run_test function to execute all
+ * async tests.
+ *
+ * @return the test function that was passed in.
+ */
+var _gSupportedProperties = ["skip_if", "pref_set"];
+var _gTests = [];
+var _gRunOnlyThisTest = null;
+function add_test(
+ properties,
+ func = properties,
+ isTask = false,
+ isSetup = false
+) {
+ if (isSetup) {
+ isTask = true;
+ }
+ if (typeof properties == "function") {
+ properties = { isTask, isSetup };
+ _gTests.push([properties, func]);
+ } else if (typeof properties == "object") {
+ // Ensure only documented properties are in the object.
+ for (let prop of Object.keys(properties)) {
+ if (!_gSupportedProperties.includes(prop)) {
+ do_throw(`Task property is not supported: ${prop}`);
+ }
+ }
+ properties.isTask = isTask;
+ properties.isSetup = isSetup;
+ _gTests.push([properties, func]);
+ } else {
+ do_throw("add_test() should take a function or an object and a function");
+ }
+ func.skip = () => (properties.skip_if = () => true);
+ func.only = () => (_gRunOnlyThisTest = func);
+ return func;
+}
+
+/**
+ * Add a test function which is a Task function.
+ *
+ * @param funcOrProperties
+ * An async function to be run or an object represents test properties.
+ * Supported properties:
+ * skip_if : An arrow function which has an expression to be
+ * evaluated whether the test is skipped or not.
+ * pref_set: An array of preferences to set for the test, reset at end of test.
+ * @param func
+ * An async function to be run only if the funcOrProperies is not a function.
+ *
+ * Task functions are functions fed into Task.jsm's Task.spawn(). They are async
+ * functions that emit promises.
+ *
+ * If an exception is thrown, a do_check_* comparison fails, or if a rejected
+ * promise is yielded, the test function aborts immediately and the test is
+ * reported as a failure.
+ *
+ * Unlike add_test(), there is no need to call run_next_test(). The next test
+ * will run automatically as soon the task function is exhausted. To trigger
+ * premature (but successful) termination of the function or simply return.
+ *
+ * Example usage:
+ *
+ * add_task(async function test() {
+ * let result = await Promise.resolve(true);
+ *
+ * do_check_true(result);
+ *
+ * let secondary = await someFunctionThatReturnsAPromise(result);
+ * do_check_eq(secondary, "expected value");
+ * });
+ *
+ * add_task(async function test_early_return() {
+ * let result = await somethingThatReturnsAPromise();
+ *
+ * if (!result) {
+ * // Test is ended immediately, with success.
+ * return;
+ * }
+ *
+ * do_check_eq(result, "foo");
+ * });
+ *
+ * add_task({
+ * skip_if: () => !("@mozilla.org/telephony/volume-service;1" in Components.classes),
+ * pref_set: [["some.pref", "value"], ["another.pref", true]],
+ * }, async function test_volume_service() {
+ * let volumeService = Cc["@mozilla.org/telephony/volume-service;1"]
+ * .getService(Ci.nsIVolumeService);
+ * ...
+ * });
+ */
+function add_task(properties, func = properties) {
+ return add_test(properties, func, true);
+}
+
+/**
+ * add_setup is like add_task, but creates setup tasks.
+ */
+function add_setup(properties, func = properties) {
+ return add_test(properties, func, true, true);
+}
+
+const _setTaskPrefs = prefs => {
+ for (let [pref, value] of prefs) {
+ if (value === undefined) {
+ // Clear any pref that didn't have a user value.
+ info(`Clearing pref "${pref}"`);
+ _Services.prefs.clearUserPref(pref);
+ continue;
+ }
+
+ info(`Setting pref "${pref}": ${value}`);
+ switch (typeof value) {
+ case "boolean":
+ _Services.prefs.setBoolPref(pref, value);
+ break;
+ case "number":
+ _Services.prefs.setIntPref(pref, value);
+ break;
+ case "string":
+ _Services.prefs.setStringPref(pref, value);
+ break;
+ default:
+ throw new Error("runWithPrefs doesn't support this pref type yet");
+ }
+ }
+};
+
+const _getTaskPrefs = prefs => {
+ return prefs.map(([pref, value]) => {
+ info(`Getting initial pref value for "${pref}"`);
+ if (!_Services.prefs.prefHasUserValue(pref)) {
+ // Check if the pref doesn't have a user value.
+ return [pref, undefined];
+ }
+ switch (typeof value) {
+ case "boolean":
+ return [pref, _Services.prefs.getBoolPref(pref)];
+ case "number":
+ return [pref, _Services.prefs.getIntPref(pref)];
+ case "string":
+ return [pref, _Services.prefs.getStringPref(pref)];
+ default:
+ throw new Error("runWithPrefs doesn't support this pref type yet");
+ }
+ });
+};
+
+/**
+ * Runs the next test function from the list of async tests.
+ */
+var _gRunningTest = null;
+var _gTestIndex = 0; // The index of the currently running test.
+var _gTaskRunning = false;
+var _gSetupRunning = false;
+function run_next_test() {
+ if (_gTaskRunning) {
+ throw new Error(
+ "run_next_test() called from an add_task() test function. " +
+ "run_next_test() should not be called from inside add_setup() or add_task() " +
+ "under any circumstances!"
+ );
+ }
+
+ if (_gSetupRunning) {
+ throw new Error(
+ "run_next_test() called from an add_setup() test function. " +
+ "run_next_test() should not be called from inside add_setup() or add_task() " +
+ "under any circumstances!"
+ );
+ }
+
+ function _run_next_test() {
+ if (_gTestIndex < _gTests.length) {
+ // Check for uncaught rejections as early and often as possible.
+ _PromiseTestUtils.assertNoUncaughtRejections();
+ let _properties;
+ [_properties, _gRunningTest] = _gTests[_gTestIndex++];
+
+ // Must set to pending before we check for skip, so that we keep the
+ // running counts correct.
+ _testLogger.info(
+ `${_TEST_NAME} | Starting ${_properties.isSetup ? "setup " : ""}${
+ _gRunningTest.name
+ }`
+ );
+ do_test_pending(_gRunningTest.name);
+
+ if (
+ (typeof _properties.skip_if == "function" && _properties.skip_if()) ||
+ (_gRunOnlyThisTest &&
+ _gRunningTest != _gRunOnlyThisTest &&
+ !_properties.isSetup)
+ ) {
+ let _condition = _gRunOnlyThisTest
+ ? "only one task may run."
+ : _properties.skip_if.toSource().replace(/\(\)\s*=>\s*/, "");
+ if (_condition == "true") {
+ _condition = "explicitly skipped.";
+ }
+ let _message =
+ _gRunningTest.name +
+ " skipped because the following conditions were" +
+ " met: (" +
+ _condition +
+ ")";
+ _testLogger.testStatus(
+ _TEST_NAME,
+ _gRunningTest.name,
+ "SKIP",
+ "SKIP",
+ _message
+ );
+ executeSoon(run_next_test);
+ return;
+ }
+
+ let initialPrefsValues = [];
+ if (_properties.pref_set) {
+ initialPrefsValues = _getTaskPrefs(_properties.pref_set);
+ _setTaskPrefs(_properties.pref_set);
+ }
+
+ if (_properties.isTask) {
+ if (_properties.isSetup) {
+ _gSetupRunning = true;
+ } else {
+ _gTaskRunning = true;
+ }
+ let startTime = Cu.now();
+ (async () => _gRunningTest())().then(
+ result => {
+ _gTaskRunning = _gSetupRunning = false;
+ ChromeUtils.addProfilerMarker(
+ "task",
+ { category: "Test", startTime },
+ _gRunningTest.name || undefined
+ );
+ if (_isGenerator(result)) {
+ Assert.ok(false, "Task returned a generator");
+ }
+ _setTaskPrefs(initialPrefsValues);
+ run_next_test();
+ },
+ ex => {
+ _gTaskRunning = _gSetupRunning = false;
+ ChromeUtils.addProfilerMarker(
+ "task",
+ { category: "Test", startTime },
+ _gRunningTest.name || undefined
+ );
+ _setTaskPrefs(initialPrefsValues);
+ try {
+ do_report_unexpected_exception(ex);
+ } catch (error) {
+ // The above throws NS_ERROR_ABORT and we don't want this to show up
+ // as an unhandled rejection later.
+ }
+ }
+ );
+ } else {
+ // Exceptions do not kill asynchronous tests, so they'll time out.
+ let startTime = Cu.now();
+ try {
+ _gRunningTest();
+ } catch (e) {
+ do_throw(e);
+ } finally {
+ ChromeUtils.addProfilerMarker(
+ "xpcshell-test",
+ { category: "Test", startTime },
+ _gRunningTest.name || undefined
+ );
+ _setTaskPrefs(initialPrefsValues);
+ }
+ }
+ }
+ }
+
+ function frontLoadSetups() {
+ _gTests.sort(([propsA, funcA], [propsB, funcB]) => {
+ if (propsB.isSetup === propsA.isSetup) {
+ return 0;
+ }
+ return propsB.isSetup ? 1 : -1;
+ });
+ }
+
+ if (!_gTestIndex) {
+ frontLoadSetups();
+ }
+
+ // For sane stacks during failures, we execute this code soon, but not now.
+ // We do this now, before we call do_test_finished(), to ensure the pending
+ // counter (_tests_pending) never reaches 0 while we still have tests to run
+ // (executeSoon bumps that counter).
+ executeSoon(_run_next_test, "run_next_test " + _gTestIndex);
+
+ if (_gRunningTest !== null) {
+ // Close the previous test do_test_pending call.
+ do_test_finished(_gRunningTest.name);
+ }
+}
+
+try {
+ // Set global preferences
+ if (runningInParent) {
+ let prefsFile = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ prefsFile.initWithPath(_PREFS_FILE);
+ _Services.prefs.readUserPrefsFromFile(prefsFile);
+ }
+} catch (e) {
+ do_throw(e);
+}
+
+/**
+ * Changing/Adding scalars or events to Telemetry is supported in build-faster/artifacts builds.
+ * These need to be loaded explicitly at start.
+ * It usually happens once all of Telemetry is initialized and set up.
+ * However in xpcshell tests Telemetry is not necessarily fully loaded,
+ * so we help out users by loading at least the dynamic-builtin probes.
+ */
+try {
+ // We only need to run this in the parent process.
+ // We only want to run this for local developer builds (which should have a "default" update channel).
+ if (runningInParent && _AppConstants.MOZ_UPDATE_CHANNEL == "default") {
+ let startTime = Cu.now();
+ let {
+ TelemetryController: _TelemetryController,
+ } = ChromeUtils.importESModule(
+ "resource://gre/modules/TelemetryController.sys.mjs"
+ );
+
+ let complete = false;
+ _TelemetryController.testRegisterJsProbes().finally(() => {
+ ChromeUtils.addProfilerMarker(
+ "xpcshell-test",
+ { category: "Test", startTime },
+ "TelemetryController.testRegisterJsProbes"
+ );
+ complete = true;
+ });
+ _Services.tm.spinEventLoopUntil(
+ "Test(xpcshell/head.js:run_next-Test)",
+ () => complete
+ );
+ }
+} catch (e) {
+ do_throw(e);
+}
+
+function _load_mozinfo() {
+ let mozinfoFile = Cc["@mozilla.org/file/local;1"].createInstance(Ci.nsIFile);
+ mozinfoFile.initWithPath(_MOZINFO_JS_PATH);
+ let stream = Cc["@mozilla.org/network/file-input-stream;1"].createInstance(
+ Ci.nsIFileInputStream
+ );
+ stream.init(mozinfoFile, -1, 0, 0);
+ let bytes = _NetUtil.readInputStream(stream, stream.available());
+ let decoded = JSON.parse(new TextDecoder().decode(bytes));
+ stream.close();
+ return decoded;
+}
+
+Object.defineProperty(this, "mozinfo", {
+ configurable: true,
+ get() {
+ let _mozinfo = _load_mozinfo();
+ Object.defineProperty(this, "mozinfo", {
+ configurable: false,
+ value: _mozinfo,
+ });
+ return _mozinfo;
+ },
+});
diff --git a/testing/xpcshell/mach_commands.py b/testing/xpcshell/mach_commands.py
new file mode 100644
index 0000000000..1e16e168de
--- /dev/null
+++ b/testing/xpcshell/mach_commands.py
@@ -0,0 +1,278 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Integrates the xpcshell test runner with mach.
+
+import errno
+import logging
+import os
+import sys
+from multiprocessing import cpu_count
+
+from mach.decorators import Command
+from mozbuild.base import BinaryNotFoundException
+from mozbuild.base import MachCommandConditions as conditions
+from mozbuild.base import MozbuildObject
+from mozlog import structured
+from xpcshellcommandline import parser_desktop, parser_remote
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+
+# This should probably be consolidated with similar classes in other test
+# runners.
+class InvalidTestPathError(Exception):
+ """Exception raised when the test path is not valid."""
+
+
+class XPCShellRunner(MozbuildObject):
+ """Run xpcshell tests."""
+
+ def run_suite(self, **kwargs):
+ return self._run_xpcshell_harness(**kwargs)
+
+ def run_test(self, **kwargs):
+ """Runs an individual xpcshell test."""
+
+ # TODO Bug 794506 remove once mach integrates with virtualenv.
+ build_path = os.path.join(self.topobjdir, "build")
+ if build_path not in sys.path:
+ sys.path.append(build_path)
+
+ src_build_path = os.path.join(self.topsrcdir, "mozilla", "build")
+ if os.path.isdir(src_build_path):
+ sys.path.append(src_build_path)
+
+ return self.run_suite(**kwargs)
+
+ def _run_xpcshell_harness(self, **kwargs):
+ # Obtain a reference to the xpcshell test runner.
+ import runxpcshelltests
+
+ log = kwargs.pop("log")
+
+ xpcshell = runxpcshelltests.XPCShellTests(log=log)
+ self.log_manager.enable_unstructured()
+
+ tests_dir = os.path.join(self.topobjdir, "_tests", "xpcshell")
+ # We want output from the test to be written immediately if we are only
+ # running a single test.
+ single_test = (
+ len(kwargs["testPaths"]) == 1
+ and os.path.isfile(kwargs["testPaths"][0])
+ or kwargs["manifest"]
+ and (len(kwargs["manifest"].test_paths()) == 1)
+ )
+
+ if single_test:
+ kwargs["verbose"] = True
+
+ if kwargs["xpcshell"] is None:
+ try:
+ kwargs["xpcshell"] = self.get_binary_path("xpcshell")
+ except BinaryNotFoundException as e:
+ self.log(
+ logging.ERROR, "xpcshell-test", {"error": str(e)}, "ERROR: {error}"
+ )
+ self.log(logging.INFO, "xpcshell-test", {"help": e.help()}, "{help}")
+ return 1
+
+ if kwargs["mozInfo"] is None:
+ kwargs["mozInfo"] = os.path.join(self.topobjdir, "mozinfo.json")
+
+ if kwargs["symbolsPath"] is None:
+ kwargs["symbolsPath"] = os.path.join(self.distdir, "crashreporter-symbols")
+
+ if kwargs["logfiles"] is None:
+ kwargs["logfiles"] = False
+
+ if kwargs["profileName"] is None:
+ kwargs["profileName"] = "firefox"
+
+ if kwargs["testingModulesDir"] is None:
+ kwargs["testingModulesDir"] = os.path.join(self.topobjdir, "_tests/modules")
+
+ if kwargs["utility_path"] is None:
+ kwargs["utility_path"] = self.bindir
+
+ if kwargs["manifest"] is None:
+ kwargs["manifest"] = os.path.join(tests_dir, "xpcshell.ini")
+
+ if kwargs["failure_manifest"] is None:
+ kwargs["failure_manifest"] = os.path.join(
+ self.statedir, "xpcshell.failures.ini"
+ )
+
+ # Use the object directory for the temp directory to minimize the chance
+ # of file scanning. The overhead from e.g. search indexers and anti-virus
+ # scanners like Windows Defender can add tons of overhead to test execution.
+ # We encourage people to disable these things in the object directory.
+ temp_dir = os.path.join(self.topobjdir, "temp")
+ try:
+ os.mkdir(temp_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ kwargs["tempDir"] = temp_dir
+
+ result = xpcshell.runTests(kwargs)
+
+ self.log_manager.disable_unstructured()
+
+ if not result and not xpcshell.sequential:
+ print(
+ "Tests were run in parallel. Try running with --sequential "
+ "to make sure the failures were not caused by this."
+ )
+ return int(not result)
+
+
+class AndroidXPCShellRunner(MozbuildObject):
+ """Run Android xpcshell tests."""
+
+ def run_test(self, **kwargs):
+ # TODO Bug 794506 remove once mach integrates with virtualenv.
+ build_path = os.path.join(self.topobjdir, "build")
+ if build_path not in sys.path:
+ sys.path.append(build_path)
+
+ import remotexpcshelltests
+
+ log = kwargs.pop("log")
+ self.log_manager.enable_unstructured()
+
+ if kwargs["xpcshell"] is None:
+ kwargs["xpcshell"] = "xpcshell"
+
+ if not kwargs["objdir"]:
+ kwargs["objdir"] = self.topobjdir
+
+ if not kwargs["localBin"]:
+ kwargs["localBin"] = os.path.join(self.topobjdir, "dist/bin")
+
+ if not kwargs["testingModulesDir"]:
+ kwargs["testingModulesDir"] = os.path.join(self.topobjdir, "_tests/modules")
+
+ if not kwargs["mozInfo"]:
+ kwargs["mozInfo"] = os.path.join(self.topobjdir, "mozinfo.json")
+
+ if not kwargs["manifest"]:
+ kwargs["manifest"] = os.path.join(
+ self.topobjdir, "_tests/xpcshell/xpcshell.ini"
+ )
+
+ if not kwargs["symbolsPath"]:
+ kwargs["symbolsPath"] = os.path.join(self.distdir, "crashreporter-symbols")
+
+ if self.substs.get("MOZ_BUILD_APP") == "b2g":
+ kwargs["localAPK"] = None
+ elif not kwargs["localAPK"]:
+ for root, _, paths in os.walk(os.path.join(kwargs["objdir"], "gradle")):
+ for file_name in paths:
+ if file_name.endswith(".apk") and file_name.startswith(
+ "test_runner-withGeckoBinaries"
+ ):
+ kwargs["localAPK"] = os.path.join(root, file_name)
+ print("using APK: %s" % kwargs["localAPK"])
+ break
+ if kwargs["localAPK"]:
+ break
+ else:
+ raise Exception("APK not found in objdir. You must specify an APK.")
+
+ xpcshell = remotexpcshelltests.XPCShellRemote(kwargs, log)
+
+ result = xpcshell.runTests(
+ kwargs,
+ testClass=remotexpcshelltests.RemoteXPCShellTestThread,
+ mobileArgs=xpcshell.mobileArgs,
+ )
+
+ self.log_manager.disable_unstructured()
+
+ return int(not result)
+
+
+def get_parser():
+ build_obj = MozbuildObject.from_environment(cwd=here)
+ if (
+ conditions.is_android(build_obj)
+ or build_obj.substs.get("MOZ_BUILD_APP") == "b2g"
+ ):
+ return parser_remote()
+ else:
+ return parser_desktop()
+
+
+@Command(
+ "xpcshell-test",
+ category="testing",
+ description="Run XPCOM Shell tests (API direct unit testing)",
+ conditions=[lambda *args: True],
+ parser=get_parser,
+)
+def run_xpcshell_test(command_context, test_objects=None, **params):
+ from mozbuild.controller.building import BuildDriver
+
+ if test_objects is not None:
+ from manifestparser import TestManifest
+
+ m = TestManifest()
+ m.tests.extend(test_objects)
+ params["manifest"] = m
+
+ driver = command_context._spawn(BuildDriver)
+ driver.install_tests()
+
+ # We should probably have a utility function to ensure the tree is
+ # ready to run tests. Until then, we just create the state dir (in
+ # case the tree wasn't built with mach).
+ command_context._ensure_state_subdir_exists(".")
+
+ if not params.get("log"):
+ log_defaults = {
+ command_context._mach_context.settings["test"]["format"]: sys.stdout
+ }
+ fmt_defaults = {
+ "level": command_context._mach_context.settings["test"]["level"],
+ "verbose": True,
+ }
+ params["log"] = structured.commandline.setup_logging(
+ "XPCShellTests", params, log_defaults, fmt_defaults
+ )
+
+ if not params["threadCount"]:
+ # pylint --py3k W1619
+ params["threadCount"] = int((cpu_count() * 3) / 2)
+
+ if (
+ conditions.is_android(command_context)
+ or command_context.substs.get("MOZ_BUILD_APP") == "b2g"
+ ):
+ from mozrunner.devices.android_device import (
+ InstallIntent,
+ get_adb_path,
+ verify_android_device,
+ )
+
+ install = InstallIntent.YES if params["setup"] else InstallIntent.NO
+ device_serial = params.get("deviceSerial")
+ verify_android_device(
+ command_context,
+ network=True,
+ install=install,
+ device_serial=device_serial,
+ )
+ if not params["adbPath"]:
+ params["adbPath"] = get_adb_path(command_context)
+ xpcshell = command_context._spawn(AndroidXPCShellRunner)
+ else:
+ xpcshell = command_context._spawn(XPCShellRunner)
+ xpcshell.cwd = command_context._mach_context.cwd
+
+ try:
+ return xpcshell.run_test(**params)
+ except InvalidTestPathError as e:
+ print(str(e))
+ return 1
diff --git a/testing/xpcshell/mach_test_package_commands.py b/testing/xpcshell/mach_test_package_commands.py
new file mode 100644
index 0000000000..0e882435d8
--- /dev/null
+++ b/testing/xpcshell/mach_test_package_commands.py
@@ -0,0 +1,48 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+from argparse import Namespace
+from functools import partial
+
+import mozlog
+from mach.decorators import Command
+from xpcshellcommandline import parser_desktop
+
+
+def run_xpcshell(context, **kwargs):
+ args = Namespace(**kwargs)
+ args.appPath = args.appPath or os.path.dirname(context.firefox_bin)
+ args.utility_path = context.bin_dir
+ args.testingModulesDir = context.modules_dir
+
+ if not args.xpcshell:
+ args.xpcshell = os.path.join(args.appPath, "xpcshell")
+
+ log = mozlog.commandline.setup_logging(
+ "XPCShellTests", args, {"mach": sys.stdout}, {"verbose": True}
+ )
+
+ if args.testPaths:
+ test_root = os.path.join(context.package_root, "xpcshell", "tests")
+ normalize = partial(context.normalize_test_path, test_root)
+ # pylint --py3k: W1636
+ args.testPaths = list(map(normalize, args.testPaths))
+
+ import runxpcshelltests
+
+ xpcshell = runxpcshelltests.XPCShellTests(log=log)
+ return xpcshell.runTests(**vars(args))
+
+
+@Command(
+ "xpcshell-test",
+ category="testing",
+ description="Run the xpcshell harness.",
+ parser=parser_desktop,
+)
+def xpcshell(command_context, **kwargs):
+ command_context._mach_context.activate_mozharness_venv()
+ return run_xpcshell(command_context._mach_context, **kwargs)
diff --git a/testing/xpcshell/moz-http2/http2-cert.key b/testing/xpcshell/moz-http2/http2-cert.key
new file mode 100644
index 0000000000..09e044f5e0
--- /dev/null
+++ b/testing/xpcshell/moz-http2/http2-cert.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6iFGoRI4W1kH9
+braIBjYQPTwT2erkNUq07PVoV2wke8HHJajg2B+9sZwGm24ahvJr4q9adWtqZHEI
+eqVap0WH9xzVJJwCfs1D/B5p0DggKZOrIMNJ5Nu5TMJrbA7tFYIP8X6taRqx0wI6
+iypB7qdw4A8Njf1mCyuwJJKkfbmIYXmQsVeQPdI7xeC4SB+oN9OIQ+8nFthVt2Za
+qn4CkC86exCABiTMHGyXrZZhW7filhLAdTGjDJHdtMr3/K0dJdMJ77kXDqdo4bN7
+LyJvaeO0ipVhHe4m1iWdq5EITjbLHCQELL8Wiy/l8Y+ZFzG4s/5JI/pyUcQx1QOs
+2hgKNe2NAgMBAAECggEBAJ7LzjhhpFTsseD+j4XdQ8kvWCXOLpl4hNDhqUnaosWs
+VZskBFDlrJ/gw+McDu+mUlpl8MIhlABO4atGPd6e6CKHzJPnRqkZKcXmrD2IdT9s
+JbpZeec+XY+yOREaPNq4pLDN9fnKsF8SM6ODNcZLVWBSXn47kq18dQTPHcfLAFeI
+r8vh6Pld90AqFRUw1YCDRoZOs3CqeZVqWHhiy1M3kTB/cNkcltItABppAJuSPGgz
+iMnzbLm16+ZDAgQceNkIIGuHAJy4yrrK09vbJ5L7kRss9NtmA1hb6a4Mo7jmQXqg
+SwbkcOoaO1gcoDpngckxW2KzDmAR8iRyWUbuxXxtlEECgYEA3W4dT//r9o2InE0R
+TNqqnKpjpZN0KGyKXCmnF7umA3VkTVyqZ0xLi8cyY1hkYiDkVQ12CKwn1Vttt0+N
+gSfvj6CQmLaRR94GVXNEfhg9Iv59iFrOtRPZWB3V4HwakPXOCHneExNx7O/JznLp
+xD3BJ9I4GQ3oEXc8pdGTAfSMdCsCgYEA16dz2evDgKdn0v7Ak0rU6LVmckB3Gs3r
+ta15b0eP7E1FmF77yVMpaCicjYkQL63yHzTi3UlA66jAnW0fFtzClyl3TEMnXpJR
+3b5JCeH9O/Hkvt9Go5uLODMo70rjuVuS8gcK8myefFybWH/t3gXo59hspXiG+xZY
+EKd7mEW8MScCgYEAlkcrQaYQwK3hryJmwWAONnE1W6QtS1oOtOnX6zWBQAul3RMs
+2xpekyjHu8C7sBVeoZKXLt+X0SdR2Pz2rlcqMLHqMJqHEt1OMyQdse5FX8CT9byb
+WS11bmYhR08ywHryL7J100B5KzK6JZC7smGu+5WiWO6lN2VTFb6cJNGRmS0CgYAo
+tFCnp1qFZBOyvab3pj49lk+57PUOOCPvbMjo+ibuQT+LnRIFVA8Su+egx2got7pl
+rYPMpND+KiIBFOGzXQPVqFv+Jwa9UPzmz83VcbRspiG47UfWBbvnZbCqSgZlrCU2
+TaIBVAMuEgS4VZ0+NPtbF3yaVv+TUQpaSmKHwVHeLQKBgCgGe5NVgB0u9S36ltit
+tYlnPPjuipxv9yruq+nva+WKT0q/BfeIlH3IUf2qNFQhR6caJGv7BU7naqNGq80m
+ks/J5ExR5vBpxzXgc7oBn2pyFJYckbJoccrqv48GRBigJpDjmo1f8wZ7fNt/ULH1
+NBinA5ZsT8d0v3QCr2xDJH9D
+-----END PRIVATE KEY-----
diff --git a/testing/xpcshell/moz-http2/http2-cert.key.keyspec b/testing/xpcshell/moz-http2/http2-cert.key.keyspec
new file mode 100644
index 0000000000..4ad96d5159
--- /dev/null
+++ b/testing/xpcshell/moz-http2/http2-cert.key.keyspec
@@ -0,0 +1 @@
+default
diff --git a/testing/xpcshell/moz-http2/http2-cert.pem b/testing/xpcshell/moz-http2/http2-cert.pem
new file mode 100644
index 0000000000..1f89de1a45
--- /dev/null
+++ b/testing/xpcshell/moz-http2/http2-cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDEzCCAfugAwIBAgIUCTTdK3eSofAM6mNwAi4Z4YUn8WEwDQYJKoZIhvcNAQEL
+BQAwGTEXMBUGA1UEAwwOIEhUVFAyIFRlc3QgQ0EwIhgPMjAxNzAxMDEwMDAwMDBa
+GA8yMDI3MDEwMTAwMDAwMFowGzEZMBcGA1UEAwwQIEhUVFAyIFRlc3QgQ2VydDCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALqIUahEjhbWQf1utogGNhA9
+PBPZ6uQ1SrTs9WhXbCR7wcclqODYH72xnAabbhqG8mvir1p1a2pkcQh6pVqnRYf3
+HNUknAJ+zUP8HmnQOCApk6sgw0nk27lMwmtsDu0Vgg/xfq1pGrHTAjqLKkHup3Dg
+Dw2N/WYLK7AkkqR9uYhheZCxV5A90jvF4LhIH6g304hD7ycW2FW3ZlqqfgKQLzp7
+EIAGJMwcbJetlmFbt+KWEsB1MaMMkd20yvf8rR0l0wnvuRcOp2jhs3svIm9p47SK
+lWEd7ibWJZ2rkQhONsscJAQsvxaLL+Xxj5kXMbiz/kkj+nJRxDHVA6zaGAo17Y0C
+AwEAAaNNMEswSQYDVR0RBEIwQIIJbG9jYWxob3N0gg9mb28uZXhhbXBsZS5jb22C
+EGFsdDEuZXhhbXBsZS5jb22CEGFsdDIuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQEL
+BQADggEBAE5aEiXOkvEYeWpMhkGheeeaKwgr44qiWJKC5N/8t+NprB3vNCbTMzE9
+09iWQh9EXbwMjMQ8H0uZwedek2sryxsTzxsdTC5qmEtxs/kbf0rTNUwQDjGHvzMk
+gO+ULESdLTcIFJ57olHaZaXtPGm2ELJAOiEpsYFTafmCEPXZ/b+UkGsSkuVLSOIA
+ClaIJgjff/ucvCvRwl79GzGDCoh3qpqhvxQpC/Fcdz1iQDYEVAmjgUrYJe1lTfj8
+ZozM1WIq8fQ3SCXTJK82CnX818tJio2PWq3uzb9vhpuxJJif7WoMP88Jpdh8zcEb
+YL15XPzhQMyor2p6XfwNI3J6347fd7U=
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/moz-http2/http2-cert.pem.certspec b/testing/xpcshell/moz-http2/http2-cert.pem.certspec
new file mode 100644
index 0000000000..69b3bc83e6
--- /dev/null
+++ b/testing/xpcshell/moz-http2/http2-cert.pem.certspec
@@ -0,0 +1,4 @@
+issuer: HTTP2 Test CA
+subject: HTTP2 Test Cert
+validity:20170101-20270101
+extension:subjectAlternativeName:localhost,foo.example.com,alt1.example.com,alt2.example.com
diff --git a/testing/xpcshell/moz-http2/moz-http2-child.js b/testing/xpcshell/moz-http2/moz-http2-child.js
new file mode 100644
index 0000000000..c8f5d99669
--- /dev/null
+++ b/testing/xpcshell/moz-http2/moz-http2-child.js
@@ -0,0 +1,33 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* eslint-env node */
+
+function sendBackResponse(evalResult, e) {
+ const output = { result: evalResult, error: "", errorStack: "" };
+ if (e) {
+ output.error = e.toString();
+ output.errorStack = e.stack;
+ }
+ process.send(output);
+}
+
+process.on("message", msg => {
+ const code = msg.code;
+ let evalResult = null;
+ try {
+ // eslint-disable-next-line no-eval
+ evalResult = eval(code);
+ if (evalResult instanceof Promise) {
+ evalResult
+ .then(x => sendBackResponse(x))
+ .catch(e => sendBackResponse(undefined, e));
+ return;
+ }
+ } catch (e) {
+ sendBackResponse(undefined, e);
+ return;
+ }
+ sendBackResponse(evalResult);
+});
diff --git a/testing/xpcshell/moz-http2/moz-http2.js b/testing/xpcshell/moz-http2/moz-http2.js
new file mode 100644
index 0000000000..ebc18294ce
--- /dev/null
+++ b/testing/xpcshell/moz-http2/moz-http2.js
@@ -0,0 +1,2087 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This module is the stateful server side of test_http2.js and is meant
+// to have node be restarted in between each invocation
+
+/* eslint-env node */
+
+var node_http2_root = "../node-http2";
+if (process.env.NODE_HTTP2_ROOT) {
+ node_http2_root = process.env.NODE_HTTP2_ROOT;
+}
+var http2 = require(node_http2_root);
+var fs = require("fs");
+var url = require("url");
+var crypto = require("crypto");
+const dnsPacket = require(`${node_http2_root}/../dns-packet`);
+const ip = require(`${node_http2_root}/../node-ip`);
+const { fork } = require("child_process");
+const path = require("path");
+const zlib = require("zlib");
+const odoh = require(`${node_http2_root}/../odoh-wasm/pkg`);
+
+// Hook into the decompression code to log the decompressed name-value pairs
+var compression_module = node_http2_root + "/lib/protocol/compressor";
+var http2_compression = require(compression_module);
+var HeaderSetDecompressor = http2_compression.HeaderSetDecompressor;
+var originalRead = HeaderSetDecompressor.prototype.read;
+var lastDecompressor;
+var decompressedPairs;
+HeaderSetDecompressor.prototype.read = function() {
+ if (this != lastDecompressor) {
+ lastDecompressor = this;
+ decompressedPairs = [];
+ }
+ var pair = originalRead.apply(this, arguments);
+ if (pair) {
+ decompressedPairs.push(pair);
+ }
+ return pair;
+};
+
+var connection_module = node_http2_root + "/lib/protocol/connection";
+var http2_connection = require(connection_module);
+var Connection = http2_connection.Connection;
+var originalClose = Connection.prototype.close;
+Connection.prototype.close = function(error, lastId) {
+ if (lastId !== undefined) {
+ this._lastIncomingStream = lastId;
+ }
+
+ originalClose.apply(this, arguments);
+};
+
+var framer_module = node_http2_root + "/lib/protocol/framer";
+var http2_framer = require(framer_module);
+var Serializer = http2_framer.Serializer;
+var originalTransform = Serializer.prototype._transform;
+var newTransform = function(frame, encoding, done) {
+ if (frame.type == "DATA") {
+ // Insert our empty DATA frame
+ const emptyFrame = {};
+ emptyFrame.type = "DATA";
+ emptyFrame.data = Buffer.alloc(0);
+ emptyFrame.flags = [];
+ emptyFrame.stream = frame.stream;
+ var buffers = [];
+ Serializer.DATA(emptyFrame, buffers);
+ Serializer.commonHeader(emptyFrame, buffers);
+ for (var i = 0; i < buffers.length; i++) {
+ this.push(buffers[i]);
+ }
+
+ // Reset to the original version for later uses
+ Serializer.prototype._transform = originalTransform;
+ }
+ originalTransform.apply(this, arguments);
+};
+
+function getHttpContent(pathName) {
+ var content =
+ "<!doctype html>" +
+ "<html>" +
+ "<head><title>HOORAY!</title></head>" +
+ // 'You Win!' used in tests to check we reached this server
+ "<body>You Win! (by requesting" +
+ pathName +
+ ")</body>" +
+ "</html>";
+ return content;
+}
+
+function generateContent(size) {
+ var content = "";
+ for (var i = 0; i < size; i++) {
+ content += "0";
+ }
+ return content;
+}
+
+/* This takes care of responding to the multiplexed request for us */
+var m = {
+ mp1res: null,
+ mp2res: null,
+ buf: null,
+ mp1start: 0,
+ mp2start: 0,
+
+ checkReady() {
+ if (this.mp1res != null && this.mp2res != null) {
+ this.buf = generateContent(30 * 1024);
+ this.mp1start = 0;
+ this.mp2start = 0;
+ this.send(this.mp1res, 0);
+ setTimeout(this.send.bind(this, this.mp2res, 0), 5);
+ }
+ },
+
+ send(res, start) {
+ var end = Math.min(start + 1024, this.buf.length);
+ var content = this.buf.substring(start, end);
+ res.write(content);
+ if (end < this.buf.length) {
+ setTimeout(this.send.bind(this, res, end), 10);
+ } else {
+ // Clear these variables so we can run the test again with --verify
+ if (res == this.mp1res) {
+ this.mp1res = null;
+ } else {
+ this.mp2res = null;
+ }
+ res.end();
+ }
+ },
+};
+
+var runlater = function() {};
+runlater.prototype = {
+ req: null,
+ resp: null,
+ fin: true,
+
+ onTimeout: function onTimeout() {
+ this.resp.writeHead(200);
+ if (this.fin) {
+ this.resp.end("It's all good 750ms.");
+ }
+ },
+};
+
+var runConnectLater = function() {};
+runConnectLater.prototype = {
+ req: null,
+ resp: null,
+ connect: false,
+
+ onTimeout: function onTimeout() {
+ if (this.connect) {
+ this.resp.writeHead(200);
+ this.connect = true;
+ setTimeout(executeRunLaterCatchError, 50, this);
+ } else {
+ this.resp.end("HTTP/1.1 200\n\r\n\r");
+ }
+ },
+};
+
+var moreData = function() {};
+moreData.prototype = {
+ req: null,
+ resp: null,
+ iter: 3,
+
+ onTimeout: function onTimeout() {
+ // 1mb of data
+ const content = generateContent(1024 * 1024);
+ this.resp.write(content); // 1mb chunk
+ this.iter--;
+ if (!this.iter) {
+ this.resp.end();
+ } else {
+ setTimeout(executeRunLater, 1, this);
+ }
+ },
+};
+
+function executeRunLater(arg) {
+ arg.onTimeout();
+}
+
+function executeRunLaterCatchError(arg) {
+ arg.onTimeout();
+}
+
+var h11required_conn = null;
+var h11required_header = "yes";
+var didRst = false;
+var rstConnection = null;
+var illegalheader_conn = null;
+
+var gDoHPortsLog = [];
+var gDoHNewConnLog = {};
+var gDoHRequestCount = 0;
+
+// eslint-disable-next-line complexity
+function handleRequest(req, res) {
+ var u = "";
+ if (req.url != undefined) {
+ u = url.parse(req.url, true);
+ }
+ var content = getHttpContent(u.pathname);
+ var push, push1, push1a, push2, push3;
+
+ // PushService tests.
+ var pushPushServer1, pushPushServer2, pushPushServer3, pushPushServer4;
+
+ function createCNameContent(payload) {
+ let packet = dnsPacket.decode(payload);
+ if (
+ packet.questions[0].name == "cname.example.com" &&
+ packet.questions[0].type == "A"
+ ) {
+ return dnsPacket.encode({
+ id: 0,
+ type: "response",
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{ name: packet.questions[0].name, type: "A", class: "IN" }],
+ answers: [
+ {
+ name: packet.questions[0].name,
+ ttl: 55,
+ type: "CNAME",
+ flush: false,
+ data: "pointing-elsewhere.example.com",
+ },
+ ],
+ });
+ }
+ if (
+ packet.questions[0].name == "pointing-elsewhere.example.com" &&
+ packet.questions[0].type == "A"
+ ) {
+ return dnsPacket.encode({
+ id: 0,
+ type: "response",
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{ name: packet.questions[0].name, type: "A", class: "IN" }],
+ answers: [
+ {
+ name: packet.questions[0].name,
+ ttl: 55,
+ type: "A",
+ flush: false,
+ data: "99.88.77.66",
+ },
+ ],
+ });
+ }
+
+ return dnsPacket.encode({
+ id: 0,
+ type: "response",
+ flags: dnsPacket.RECURSION_DESIRED | dnsPacket.rcodes.toRcode("NXDOMAIN"),
+ questions: [
+ {
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ class: "IN",
+ },
+ ],
+ answers: [],
+ });
+ }
+
+ function createCNameARecord() {
+ // test23 asks for cname-a.example.com
+ // this responds with a CNAME to here.example.com *and* an A record
+ // for here.example.com
+ let rContent;
+
+ rContent = Buffer.from(
+ "0000" +
+ "0100" +
+ "0001" + // QDCOUNT
+ "0002" + // ANCOUNT
+ "00000000" + // NSCOUNT + ARCOUNT
+ "07636E616D652d61" + // cname-a
+ "076578616D706C6503636F6D00" + // .example.com
+ "00010001" + // question type (A) + question class (IN)
+ // answer record 1
+ "C00C" + // name pointer to cname-a.example.com
+ "0005" + // type (CNAME)
+ "0001" + // class
+ "00000037" + // TTL
+ "0012" + // RDLENGTH
+ "0468657265" + // here
+ "076578616D706C6503636F6D00" + // .example.com
+ // answer record 2, the A entry for the CNAME above
+ "0468657265" + // here
+ "076578616D706C6503636F6D00" + // .example.com
+ "0001" + // type (A)
+ "0001" + // class
+ "00000037" + // TTL
+ "0004" + // RDLENGTH
+ "09080706", // IPv4 address
+ "hex"
+ );
+
+ return rContent;
+ }
+
+ function responseType(packet, responseIP) {
+ if (
+ !!packet.questions.length &&
+ packet.questions[0].name == "confirm.example.com" &&
+ packet.questions[0].type == "NS"
+ ) {
+ return "NS";
+ }
+
+ return ip.isV4Format(responseIP) ? "A" : "AAAA";
+ }
+
+ function handleAuth() {
+ // There's a Set-Cookie: header in the response for "/dns" , which this
+ // request subsequently would include if the http channel wasn't
+ // anonymous. Thus, if there's a cookie in this request, we know Firefox
+ // mishaved. If there's not, we're fine.
+ if (req.headers.cookie) {
+ res.writeHead(403);
+ res.end("cookie for me, not for you");
+ return false;
+ }
+ if (req.headers.authorization != "user:password") {
+ res.writeHead(401);
+ res.end("bad boy!");
+ return false;
+ }
+
+ return true;
+ }
+
+ function createDNSAnswer(response, packet, responseIP, requestPayload) {
+ // This shuts down the connection so we can test if the client reconnects
+ if (packet.questions.length && packet.questions[0].name == "closeme.com") {
+ response.stream.connection.close("INTERNAL_ERROR", response.stream.id);
+ return null;
+ }
+
+ if (packet.questions.length && packet.questions[0].name.endsWith(".pd")) {
+ // Bug 1543811: test edns padding extension. Return whether padding was
+ // included via the first half of the ip address (1.1 vs 2.2) and the
+ // size of the request in the second half of the ip address allowing to
+ // verify that the correct amount of padding was added.
+ if (
+ !!packet.additionals.length &&
+ packet.additionals[0].type == "OPT" &&
+ packet.additionals[0].options.some(o => o.type === "PADDING")
+ ) {
+ responseIP =
+ "1.1." +
+ ((requestPayload.length >> 8) & 0xff) +
+ "." +
+ (requestPayload.length & 0xff);
+ } else {
+ responseIP =
+ "2.2." +
+ ((requestPayload.length >> 8) & 0xff) +
+ "." +
+ (requestPayload.length & 0xff);
+ }
+ }
+
+ if (u.query.corruptedAnswer) {
+ // DNS response header is 12 bytes, we check for this minimum length
+ // at the start of decoding so this is the simplest way to force
+ // a decode error.
+ return "\xFF\xFF\xFF\xFF";
+ }
+
+ // Because we send two TRR requests (A and AAAA), skip the first two
+ // requests when testing retry.
+ if (u.query.retryOnDecodeFailure && gDoHRequestCount < 2) {
+ gDoHRequestCount++;
+ return "\xFF\xFF\xFF\xFF";
+ }
+
+ function responseData() {
+ if (
+ !!packet.questions.length &&
+ packet.questions[0].name == "confirm.example.com" &&
+ packet.questions[0].type == "NS"
+ ) {
+ return "ns.example.com";
+ }
+
+ return responseIP;
+ }
+
+ let answers = [];
+ if (
+ responseIP != "none" &&
+ responseType(packet, responseIP) == packet.questions[0].type
+ ) {
+ answers.push({
+ name: u.query.hostname ? u.query.hostname : packet.questions[0].name,
+ ttl: 55,
+ type: responseType(packet, responseIP),
+ flush: false,
+ data: responseData(),
+ });
+ }
+
+ // for use with test_dns_by_type_resolve.js
+ if (packet.questions[0].type == "TXT") {
+ answers.push({
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: Buffer.from(
+ "62586B67646D39705932556761584D6762586B676347467A63336476636D513D",
+ "hex"
+ ),
+ });
+ }
+
+ if (u.query.cnameloop) {
+ answers.push({
+ name: "cname.example.com",
+ type: "CNAME",
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: "pointing-elsewhere.example.com",
+ });
+ }
+
+ if (req.headers["accept-language"] || req.headers["user-agent"]) {
+ // If we get this header, don't send back any response. This should
+ // cause the tests to fail. This is easier then actually sending back
+ // the header value into test_trr.js
+ answers = [];
+ }
+
+ let buf = dnsPacket.encode({
+ type: "response",
+ id: packet.id,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: packet.questions,
+ answers,
+ });
+
+ return buf;
+ }
+
+ function getDelayFromPacket(packet, type) {
+ let delay = 0;
+ if (packet.questions[0].type == "A") {
+ delay = parseInt(u.query.delayIPv4);
+ } else if (packet.questions[0].type == "AAAA") {
+ delay = parseInt(u.query.delayIPv6);
+ }
+
+ if (u.query.slowConfirm && type == "NS") {
+ delay += 1000;
+ }
+
+ return delay;
+ }
+
+ function writeDNSResponse(response, buf, delay, contentType) {
+ function writeResponse(resp, buffer) {
+ resp.setHeader("Set-Cookie", "trackyou=yes; path=/; max-age=100000;");
+ resp.setHeader("Content-Type", contentType);
+ if (req.headers["accept-encoding"].includes("gzip")) {
+ zlib.gzip(buffer, function(err, result) {
+ resp.setHeader("Content-Encoding", "gzip");
+ resp.setHeader("Content-Length", result.length);
+ try {
+ resp.writeHead(200);
+ resp.end(result);
+ } catch (e) {
+ // connection was closed by the time we started writing.
+ }
+ });
+ } else {
+ const output = Buffer.from(buffer, "utf-8");
+ resp.setHeader("Content-Length", output.length);
+ try {
+ resp.writeHead(200);
+ resp.write(output);
+ resp.end("");
+ } catch (e) {
+ // connection was closed by the time we started writing.
+ }
+ }
+ }
+
+ if (delay) {
+ setTimeout(
+ arg => {
+ writeResponse(arg[0], arg[1]);
+ },
+ delay,
+ [response, buf]
+ );
+ return;
+ }
+
+ writeResponse(response, buf);
+ }
+
+ if (req.httpVersionMajor === 2) {
+ res.setHeader("X-Connection-Http2", "yes");
+ res.setHeader("X-Http2-StreamId", "" + req.stream.id);
+ } else {
+ res.setHeader("X-Connection-Http2", "no");
+ }
+
+ if (u.pathname === "/exit") {
+ res.setHeader("Content-Type", "text/plain");
+ res.setHeader("Connection", "close");
+ res.writeHead(200);
+ res.end("ok");
+ process.exit();
+ }
+
+ if (req.method == "CONNECT") {
+ if (req.headers.host == "illegalhpacksoft.example.com:80") {
+ illegalheader_conn = req.stream.connection;
+ res.setHeader("Content-Type", "text/html");
+ res.setHeader("x-softillegalhpack", "true");
+ res.writeHead(200);
+ res.end(content);
+ return;
+ } else if (req.headers.host == "illegalhpackhard.example.com:80") {
+ res.setHeader("Content-Type", "text/html");
+ res.setHeader("x-hardillegalhpack", "true");
+ res.writeHead(200);
+ res.end(content);
+ return;
+ } else if (req.headers.host == "750.example.com:80") {
+ // This response will mock a response through a proxy to a HTTP server.
+ // After 750ms , a 200 response for the proxy will be sent then
+ // after additional 50ms a 200 response for the HTTP GET request.
+ let rl = new runConnectLater();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLaterCatchError, 750, rl);
+ return;
+ } else if (req.headers.host == "h11required.com:80") {
+ if (req.httpVersionMajor === 2) {
+ res.stream.reset("HTTP_1_1_REQUIRED");
+ }
+ return;
+ }
+ } else if (u.pathname === "/750ms") {
+ let rl = new runlater();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLater, 750, rl);
+ return;
+ } else if (u.pathname === "/750msNoData") {
+ let rl = new runlater();
+ rl.req = req;
+ rl.resp = res;
+ rl.fin = false;
+ setTimeout(executeRunLater, 750, rl);
+ return;
+ } else if (u.pathname === "/multiplex1" && req.httpVersionMajor === 2) {
+ res.setHeader("Content-Type", "text/plain");
+ res.writeHead(200);
+ m.mp1res = res;
+ m.checkReady();
+ return;
+ } else if (u.pathname === "/multiplex2" && req.httpVersionMajor === 2) {
+ res.setHeader("Content-Type", "text/plain");
+ res.writeHead(200);
+ m.mp2res = res;
+ m.checkReady();
+ return;
+ } else if (u.pathname === "/header") {
+ var val = req.headers["x-test-header"];
+ if (val) {
+ res.setHeader("X-Received-Test-Header", val);
+ }
+ } else if (u.pathname === "/doubleheader") {
+ res.setHeader("Content-Type", "text/html");
+ res.writeHead(200);
+ res.write(content);
+ res.writeHead(200);
+ res.end();
+ return;
+ } else if (u.pathname === "/cookie_crumbling") {
+ res.setHeader("X-Received-Header-Pairs", JSON.stringify(decompressedPairs));
+ } else if (u.pathname === "/push") {
+ push = res.push("/push.js");
+ push.writeHead(200, {
+ "content-type": "application/javascript",
+ pushed: "yes",
+ "content-length": 11,
+ "X-Connection-Http2": "yes",
+ });
+ push.end("// comments");
+ content = '<head> <script src="push.js"/></head>body text';
+ } else if (u.pathname === "/push.js") {
+ content = "// comments";
+ res.setHeader("pushed", "no");
+ } else if (u.pathname === "/push2") {
+ push = res.push("/push2.js");
+ push.writeHead(200, {
+ "content-type": "application/javascript",
+ pushed: "yes",
+ // no content-length
+ "X-Connection-Http2": "yes",
+ });
+ push.end("// comments");
+ content = '<head> <script src="push2.js"/></head>body text';
+ } else if (u.pathname === "/push5") {
+ push = res.push("/push5.js");
+ push.writeHead(200, {
+ "content-type": "application/javascript",
+ pushed: "yes",
+ // no content-length
+ "X-Connection-Http2": "yes",
+ });
+ content = generateContent(1024 * 150);
+ push.write(content);
+ push.end();
+ content = '<head> <script src="push5.js"/></head>body text';
+ } else if (u.pathname === "/pushapi1") {
+ push1 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushapi1/1",
+ method: "GET",
+ headers: { "x-pushed-request": "true", "x-foo": "bar" },
+ });
+ push1.writeHead(200, {
+ pushed: "yes",
+ "content-length": 1,
+ subresource: "1",
+ "X-Connection-Http2": "yes",
+ });
+ push1.end("1");
+
+ push1a = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushapi1/1",
+ method: "GET",
+ headers: { "x-foo": "bar", "x-pushed-request": "true" },
+ });
+ push1a.writeHead(200, {
+ pushed: "yes",
+ "content-length": 1,
+ subresource: "1a",
+ "X-Connection-Http2": "yes",
+ });
+ push1a.end("1");
+
+ push2 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushapi1/2",
+ method: "GET",
+ headers: { "x-pushed-request": "true" },
+ });
+ push2.writeHead(200, {
+ pushed: "yes",
+ subresource: "2",
+ "content-length": 1,
+ "X-Connection-Http2": "yes",
+ });
+ push2.end("2");
+
+ push3 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushapi1/3",
+ method: "GET",
+ headers: { "x-pushed-request": "true", "Accept-Encoding": "br" },
+ });
+ push3.writeHead(200, {
+ pushed: "yes",
+ "content-length": 6,
+ subresource: "3",
+ "content-encoding": "br",
+ "X-Connection-Http2": "yes",
+ });
+ push3.end(Buffer.from([0x8b, 0x00, 0x80, 0x33, 0x0a, 0x03])); // '3\n'
+
+ content = "0";
+ } else if (u.pathname === "/big") {
+ content = generateContent(128 * 1024);
+ var hash = crypto.createHash("md5");
+ hash.update(content);
+ let md5 = hash.digest("hex");
+ res.setHeader("X-Expected-MD5", md5);
+ } else if (u.pathname === "/huge") {
+ content = generateContent(1024);
+ res.setHeader("Content-Type", "text/plain");
+ res.writeHead(200);
+ // 1mb of data
+ for (let i = 0; i < 1024 * 1; i++) {
+ res.write(content); // 1kb chunk
+ }
+ res.end();
+ return;
+ } else if (u.pathname === "/post" || u.pathname === "/patch") {
+ if (req.method != "POST" && req.method != "PATCH") {
+ res.writeHead(405);
+ res.end("Unexpected method: " + req.method);
+ return;
+ }
+
+ var post_hash = crypto.createHash("md5");
+ var received_data = false;
+ req.on("data", function receivePostData(chunk) {
+ received_data = true;
+ post_hash.update(chunk.toString());
+ });
+ req.on("end", function finishPost() {
+ let md5 = received_data ? post_hash.digest("hex") : "0";
+ res.setHeader("X-Calculated-MD5", md5);
+ res.writeHead(200);
+ res.end(content);
+ });
+
+ return;
+ } else if (u.pathname === "/750msPost") {
+ if (req.method != "POST") {
+ res.writeHead(405);
+ res.end("Unexpected method: " + req.method);
+ return;
+ }
+
+ var accum = 0;
+ req.on("data", function receivePostData(chunk) {
+ accum += chunk.length;
+ });
+ req.on("end", function finishPost() {
+ res.setHeader("X-Recvd", accum);
+ let rl = new runlater();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLater, 750, rl);
+ });
+
+ return;
+ } else if (u.pathname === "/h11required_stream") {
+ if (req.httpVersionMajor === 2) {
+ h11required_conn = req.stream.connection;
+ res.stream.reset("HTTP_1_1_REQUIRED");
+ return;
+ }
+ } else if (u.pathname === "/bigdownload") {
+ res.setHeader("Content-Type", "text/html");
+ res.writeHead(200);
+
+ let rl = new moreData();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLater, 1, rl);
+ return;
+ } else if (u.pathname === "/h11required_session") {
+ if (req.httpVersionMajor === 2) {
+ if (h11required_conn !== req.stream.connection) {
+ h11required_header = "no";
+ }
+ res.stream.connection.close("HTTP_1_1_REQUIRED", res.stream.id - 2);
+ return;
+ }
+ res.setHeader("X-H11Required-Stream-Ok", h11required_header);
+ } else if (u.pathname === "/rstonce") {
+ if (!didRst && req.httpVersionMajor === 2) {
+ didRst = true;
+ rstConnection = req.stream.connection;
+ req.stream.reset("REFUSED_STREAM");
+ return;
+ }
+
+ if (rstConnection === null || rstConnection !== req.stream.connection) {
+ if (req.httpVersionMajor != 2) {
+ res.setHeader("Connection", "close");
+ }
+ res.writeHead(400);
+ res.end("WRONG CONNECTION, HOMIE!");
+ return;
+ }
+
+ // Clear these variables so we can run the test again with --verify
+ didRst = false;
+ rstConnection = null;
+
+ if (req.httpVersionMajor != 2) {
+ res.setHeader("Connection", "close");
+ }
+ res.writeHead(200);
+ res.end("It's all good.");
+ return;
+ } else if (u.pathname === "/continuedheaders") {
+ var pushRequestHeaders = { "x-pushed-request": "true" };
+ var pushResponseHeaders = {
+ "content-type": "text/plain",
+ "content-length": "2",
+ "X-Connection-Http2": "yes",
+ };
+ var pushHdrTxt =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ var pullHdrTxt = pushHdrTxt
+ .split("")
+ .reverse()
+ .join("");
+ for (let i = 0; i < 265; i++) {
+ pushRequestHeaders["X-Push-Test-Header-" + i] = pushHdrTxt;
+ res.setHeader("X-Pull-Test-Header-" + i, pullHdrTxt);
+ }
+ push = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/continuedheaders/push",
+ method: "GET",
+ headers: pushRequestHeaders,
+ });
+ push.writeHead(200, pushResponseHeaders);
+ push.end("ok");
+ } else if (u.pathname === "/altsvc1") {
+ if (
+ req.httpVersionMajor != 2 ||
+ req.scheme != "http" ||
+ req.headers["alt-used"] != "foo.example.com:" + serverPort
+ ) {
+ res.writeHead(400);
+ res.end("WHAT?");
+ return;
+ }
+ // test the alt svc frame for use with altsvc2
+ res.altsvc(
+ "foo.example.com",
+ serverPort,
+ "h2",
+ 3600,
+ req.headers["x-redirect-origin"]
+ );
+ } else if (u.pathname === "/altsvc2") {
+ if (
+ req.httpVersionMajor != 2 ||
+ req.scheme != "http" ||
+ req.headers["alt-used"] != "foo.example.com:" + serverPort
+ ) {
+ res.writeHead(400);
+ res.end("WHAT?");
+ return;
+ }
+ }
+
+ // for use with test_altsvc.js
+ else if (u.pathname === "/altsvc-test") {
+ res.setHeader("Cache-Control", "no-cache");
+ res.setHeader("Alt-Svc", "h2=" + req.headers["x-altsvc"]);
+ }
+ // for use with test_http3.js
+ else if (u.pathname === "/http3-test") {
+ res.setHeader("Cache-Control", "no-cache");
+ res.setHeader("Alt-Svc", "h3-29=" + req.headers["x-altsvc"]);
+ }
+ // for use with test_http3.js
+ else if (u.pathname === "/http3-test2") {
+ res.setHeader("Cache-Control", "no-cache");
+ res.setHeader(
+ "Alt-Svc",
+ "h2=foo2.example.com:8000,h3-29=" +
+ req.headers["x-altsvc"] +
+ ",h3-30=foo2.example.com:8443"
+ );
+ }
+ // for use with test_trr.js
+ else if (u.pathname === "/dns-cname") {
+ // asking for cname.example.com
+
+ function emitResponse(response, payload) {
+ let pcontent = createCNameContent(payload);
+ response.setHeader("Content-Type", "application/dns-message");
+ response.setHeader("Content-Length", pcontent.length);
+ response.writeHead(200);
+ response.write(pcontent);
+ response.end("");
+ }
+
+ let payload = Buffer.from("");
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ emitResponse(res, payload);
+ });
+ return;
+ } else if (u.pathname == "/get-doh-req-port-log") {
+ let rContent = JSON.stringify(gDoHPortsLog);
+ res.setHeader("Content-Type", "text/plain");
+ res.setHeader("Content-Length", rContent.length);
+ res.writeHead(400);
+ res.end(rContent);
+ return;
+ } else if (u.pathname == "/reset-doh-request-count") {
+ gDoHRequestCount = 0;
+ res.setHeader("Content-Type", "text/plain");
+ res.setHeader("Content-Length", "ok".length);
+ res.writeHead(200);
+ res.write("ok");
+ res.end("");
+ return;
+ } else if (u.pathname == "/doh") {
+ let responseIP = u.query.responseIP;
+ if (!responseIP) {
+ responseIP = "5.5.5.5";
+ }
+
+ let redirect = u.query.redirect;
+ if (redirect) {
+ responseIP = redirect;
+ if (u.query.dns) {
+ res.setHeader(
+ "Location",
+ "https://localhost:" +
+ serverPort +
+ "/doh?responseIP=" +
+ responseIP +
+ "&dns=" +
+ u.query.dns
+ );
+ } else {
+ res.setHeader(
+ "Location",
+ "https://localhost:" + serverPort + "/doh?responseIP=" + responseIP
+ );
+ }
+ res.writeHead(307);
+ res.end("");
+ return;
+ }
+
+ if (u.query.auth) {
+ if (!handleAuth()) {
+ return;
+ }
+ }
+
+ if (u.query.noResponse) {
+ return;
+ }
+
+ if (u.query.push) {
+ // push.example.org has AAAA entry 2018::2018
+ let pcontent = dnsPacket.encode({
+ id: 0,
+ type: "response",
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{ name: "push.example.org", type: "AAAA", class: "IN" }],
+ answers: [
+ {
+ name: "push.example.org",
+ type: "AAAA",
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: "2018::2018",
+ },
+ ],
+ });
+ push = res.push({
+ hostname: "foo.example.com:" + serverPort,
+ port: serverPort,
+ path:
+ "/dns-pushed-response?dns=AAAAAAABAAAAAAAABHB1c2gHZXhhbXBsZQNvcmcAABwAAQ",
+ method: "GET",
+ headers: {
+ accept: "application/dns-message",
+ },
+ });
+ push.writeHead(200, {
+ "content-type": "application/dns-message",
+ pushed: "yes",
+ "content-length": pcontent.length,
+ "X-Connection-Http2": "yes",
+ });
+ push.end(pcontent);
+ }
+
+ let payload = Buffer.from("");
+
+ function emitResponse(response, requestPayload, decodedPacket, delay) {
+ let packet = decodedPacket || dnsPacket.decode(requestPayload);
+ let answer = createDNSAnswer(
+ response,
+ packet,
+ responseIP,
+ requestPayload
+ );
+ if (!answer) {
+ return;
+ }
+ writeDNSResponse(
+ response,
+ answer,
+ delay || getDelayFromPacket(packet, responseType(packet, responseIP)),
+ "application/dns-message"
+ );
+ }
+
+ if (u.query.dns) {
+ payload = Buffer.from(u.query.dns, "base64");
+ emitResponse(res, payload);
+ return;
+ }
+
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ // parload is empty when we send redirect response.
+ if (payload.length) {
+ let packet = dnsPacket.decode(payload);
+ let delay;
+ if (u.query.conncycle) {
+ let name = packet.questions[0].name;
+ if (name.startsWith("newconn")) {
+ // If we haven't seen a req for this newconn name before,
+ // or if we've seen one for the same name on the same port,
+ // synthesize a timeout.
+ if (
+ !gDoHNewConnLog[name] ||
+ gDoHNewConnLog[name] == req.remotePort
+ ) {
+ delay = 1000;
+ }
+ if (!gDoHNewConnLog[name]) {
+ gDoHNewConnLog[name] = req.remotePort;
+ }
+ }
+ gDoHPortsLog.push([packet.questions[0].name, req.remotePort]);
+ } else {
+ gDoHPortsLog = [];
+ gDoHNewConnLog = {};
+ }
+ emitResponse(res, payload, packet, delay);
+ }
+ });
+ return;
+ } else if (u.pathname === "/httpssvc") {
+ let payload = Buffer.from("");
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ let packet = dnsPacket.decode(payload);
+ let answers = [];
+ answers.push({
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: {
+ priority: 1,
+ name: "h3pool",
+ values: [
+ { key: "alpn", value: ["h2", "h3"] },
+ { key: "no-default-alpn" },
+ { key: "port", value: 8888 },
+ { key: "ipv4hint", value: "1.2.3.4" },
+ { key: "echconfig", value: "123..." },
+ { key: "ipv6hint", value: "::1" },
+ { key: 30, value: "somelargestring" },
+ { key: "odoh", value: "456..." },
+ ],
+ },
+ });
+ answers.push({
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: {
+ priority: 2,
+ name: ".",
+ values: [
+ { key: "alpn", value: "h2" },
+ { key: "ipv4hint", value: ["1.2.3.4", "5.6.7.8"] },
+ { key: "echconfig", value: "abc..." },
+ { key: "ipv6hint", value: ["::1", "fe80::794f:6d2c:3d5e:7836"] },
+ { key: "odoh", value: "def..." },
+ ],
+ },
+ });
+ answers.push({
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: {
+ priority: 3,
+ name: "hello",
+ values: [],
+ },
+ });
+ let buf = dnsPacket.encode({
+ type: "response",
+ id: packet.id,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: packet.questions,
+ answers,
+ });
+
+ res.setHeader("Content-Type", "application/dns-message");
+ res.setHeader("Content-Length", buf.length);
+ res.writeHead(200);
+ res.write(buf);
+ res.end("");
+ });
+ return;
+ } else if (u.pathname === "/odohconfig") {
+ let payload = Buffer.from("");
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ let answers = [];
+ let odohconfig;
+ if (u.query.invalid) {
+ if (u.query.invalid === "empty") {
+ odohconfig = Buffer.from("");
+ } else if (u.query.invalid === "version") {
+ odohconfig = Buffer.from(
+ "002cff030028002000010001002021c8c16355091b28d521cb196627297955c1b607a3dcf1f136534578460d077d",
+ "hex"
+ );
+ } else if (u.query.invalid === "configLength") {
+ odohconfig = Buffer.from(
+ "002cff040028002000010001002021c8c16355091b28d521cb196627297955c1b607a3dcf1f136534578460d07",
+ "hex"
+ );
+ } else if (u.query.invalid === "totalLength") {
+ odohconfig = Buffer.from(
+ "012cff030028002000010001002021c8c16355091b28d521cb196627297955c1b607a3dcf1f136534578460d077d",
+ "hex"
+ );
+ } else if (u.query.invalid === "kemId") {
+ odohconfig = Buffer.from(
+ "002cff040028002100010001002021c8c16355091b28d521cb196627297955c1b607a3dcf1f136534578460d077d",
+ "hex"
+ );
+ }
+ } else {
+ odohconfig = odoh.get_odoh_config();
+ }
+
+ if (u.query.downloadFrom === "http") {
+ res.writeHead(200);
+ res.write(odohconfig);
+ res.end("");
+ } else {
+ var b64encoded = Buffer.from(odohconfig).toString("base64");
+ let packet = dnsPacket.decode(payload);
+ if (
+ u.query.failConfirmation == "true" &&
+ packet.questions[0].type == "NS" &&
+ packet.questions[0].name == "example.com"
+ ) {
+ res.writeHead(200);
+ res.write("<12bytes");
+ res.end("");
+ return;
+ }
+ if (packet.questions[0].type == "HTTPS") {
+ answers.push({
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ ttl: u.query.ttl ? u.query.ttl : 55,
+ class: "IN",
+ flush: false,
+ data: {
+ priority: 1,
+ name: packet.questions[0].name,
+ values: [
+ {
+ key: "odoh",
+ value: b64encoded,
+ needBase64Decode: true,
+ },
+ ],
+ },
+ });
+ }
+
+ let buf = dnsPacket.encode({
+ type: "response",
+ id: packet.id,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: packet.questions,
+ answers,
+ });
+
+ res.setHeader("Content-Type", "application/dns-message");
+ res.setHeader("Content-Length", buf.length);
+ res.writeHead(200);
+ res.write(buf);
+ res.end("");
+ }
+ });
+ return;
+ } else if (u.pathname === "/odoh") {
+ let responseIP = u.query.responseIP;
+ if (!responseIP) {
+ responseIP = "5.5.5.5";
+ }
+
+ if (u.query.auth) {
+ if (!handleAuth()) {
+ return;
+ }
+ }
+
+ if (u.query.noResponse) {
+ return;
+ }
+
+ let payload = Buffer.from("");
+
+ function emitResponse(response, requestPayload) {
+ let decryptedQuery = odoh.decrypt_query(requestPayload);
+ let packet = dnsPacket.decode(Buffer.from(decryptedQuery.buffer));
+ let answer = createDNSAnswer(
+ response,
+ packet,
+ responseIP,
+ requestPayload
+ );
+ if (!answer) {
+ return;
+ }
+
+ let encryptedResponse = odoh.create_response(answer);
+ writeDNSResponse(
+ response,
+ encryptedResponse,
+ getDelayFromPacket(packet, responseType(packet, responseIP)),
+ "application/oblivious-dns-message"
+ );
+ }
+
+ if (u.query.dns) {
+ payload = Buffer.from(u.query.dns, "base64");
+ emitResponse(res, payload);
+ return;
+ }
+
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ if (u.query.httpError) {
+ res.writeHead(404);
+ res.end("Not Found");
+ return;
+ }
+
+ if (u.query.cname) {
+ let decryptedQuery = odoh.decrypt_query(payload);
+ let rContent;
+ if (u.query.cname === "ARecord") {
+ rContent = createCNameARecord();
+ } else {
+ rContent = createCNameContent(Buffer.from(decryptedQuery.buffer));
+ }
+ let encryptedResponse = odoh.create_response(rContent);
+ res.setHeader("Content-Type", "application/oblivious-dns-message");
+ res.setHeader("Content-Length", encryptedResponse.length);
+ res.writeHead(200);
+ res.write(encryptedResponse);
+ res.end("");
+ return;
+ }
+ // parload is empty when we send redirect response.
+ if (payload.length) {
+ emitResponse(res, payload);
+ }
+ });
+ return;
+ } else if (u.pathname === "/httpssvc_as_altsvc") {
+ let payload = Buffer.from("");
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ let packet = dnsPacket.decode(payload);
+ let answers = [];
+ if (packet.questions[0].type == "HTTPS") {
+ let priority = 1;
+ // Set an invalid priority to test the case when receiving a corrupted
+ // response.
+ if (packet.questions[0].name === "foo.notexisted.com") {
+ priority = 0;
+ }
+ answers.push({
+ name: packet.questions[0].name,
+ type: packet.questions[0].type,
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: {
+ priority,
+ name: "foo.example.com",
+ values: [
+ { key: "alpn", value: "h2" },
+ { key: "port", value: serverPort },
+ { key: 30, value: "somelargestring" },
+ ],
+ },
+ });
+ } else {
+ answers.push({
+ name: packet.questions[0].name,
+ type: "A",
+ ttl: 55,
+ flush: false,
+ data: "127.0.0.1",
+ });
+ }
+
+ let buf = dnsPacket.encode({
+ type: "response",
+ id: packet.id,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: packet.questions,
+ answers,
+ });
+
+ res.setHeader("Content-Type", "application/dns-message");
+ res.setHeader("Content-Length", buf.length);
+ res.writeHead(200);
+ res.write(buf);
+ res.end("");
+ });
+ return;
+ } else if (u.pathname === "/httpssvc_use_iphint") {
+ let payload = Buffer.from("");
+ req.on("data", function receiveData(chunk) {
+ payload = Buffer.concat([payload, chunk]);
+ });
+ req.on("end", function finishedData() {
+ let packet = dnsPacket.decode(payload);
+ let answers = [];
+ answers.push({
+ name: packet.questions[0].name,
+ type: "HTTPS",
+ ttl: 55,
+ class: "IN",
+ flush: false,
+ data: {
+ priority: 1,
+ name: ".",
+ values: [
+ { key: "alpn", value: "h2" },
+ { key: "port", value: serverPort },
+ { key: "ipv4hint", value: "127.0.0.1" },
+ ],
+ },
+ });
+
+ let buf = dnsPacket.encode({
+ type: "response",
+ id: packet.id,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: packet.questions,
+ answers,
+ });
+
+ res.setHeader("Content-Type", "application/dns-message");
+ res.setHeader("Content-Length", buf.length);
+ res.writeHead(200);
+ res.write(buf);
+ res.end("");
+ });
+ return;
+ } else if (u.pathname === "/dns-cname-a") {
+ let rContent = createCNameARecord();
+ res.setHeader("Content-Type", "application/dns-message");
+ res.setHeader("Content-Length", rContent.length);
+ res.writeHead(200);
+ res.write(rContent);
+ res.end("");
+ return;
+ } else if (u.pathname === "/websocket") {
+ res.setHeader("Upgrade", "websocket");
+ res.setHeader("Connection", "Upgrade");
+ var wshash = crypto.createHash("sha1");
+ wshash.update(req.headers["sec-websocket-key"]);
+ wshash.update("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
+ let key = wshash.digest("base64");
+ res.setHeader("Sec-WebSocket-Accept", key);
+ res.writeHead(101);
+ res.end("something....");
+ return;
+ }
+ // for use with test_dns_by_type_resolve.js
+ else if (u.pathname === "/txt-dns-push") {
+ // _esni_push.example.com has A entry 127.0.0.1
+ let rContent = Buffer.from(
+ "0000010000010001000000000A5F65736E695F70757368076578616D706C6503636F6D0000010001C00C000100010000003700047F000001",
+ "hex"
+ );
+
+ // _esni_push.example.com has TXT entry 2062586B67646D39705932556761584D6762586B676347467A63336476636D513D
+ var pcontent = Buffer.from(
+ "0000818000010001000000000A5F65736E695F70757368076578616D706C6503636F6D0000100001C00C001000010000003700212062586B67646D39705932556761584D6762586B676347467A63336476636D513D",
+ "hex"
+ );
+
+ push = res.push({
+ hostname: "foo.example.com:" + serverPort,
+ port: serverPort,
+ path:
+ "/dns-pushed-response?dns=AAABAAABAAAAAAABCl9lc25pX3B1c2gHZXhhbXBsZQNjb20AABAAAQAAKRAAAAAAAAAIAAgABAABAAA",
+ method: "GET",
+ headers: {
+ accept: "application/dns-message",
+ },
+ });
+ push.writeHead(200, {
+ "content-type": "application/dns-message",
+ pushed: "yes",
+ "content-length": pcontent.length,
+ "X-Connection-Http2": "yes",
+ });
+ push.end(pcontent);
+ res.setHeader("Content-Type", "application/dns-message");
+ res.setHeader("Content-Length", rContent.length);
+ res.writeHead(200);
+ res.write(rContent);
+ res.end("");
+ return;
+ } else if (u.pathname === "/.well-known/http-opportunistic") {
+ res.setHeader("Cache-Control", "no-cache");
+ res.setHeader("Content-Type", "application/json");
+ res.writeHead(200, "OK");
+ res.end('["http://' + req.headers.host + '"]');
+ return;
+ } else if (u.pathname === "/stale-while-revalidate-loop-test") {
+ res.setHeader(
+ "Cache-Control",
+ "s-maxage=86400, stale-while-revalidate=86400, immutable"
+ );
+ res.setHeader("Content-Type", "text/plain; charset=utf-8");
+ res.setHeader("X-Content-Type-Options", "nosniff");
+ res.setHeader("Content-Length", "1");
+ res.writeHead(200, "OK");
+ res.end("1");
+ return;
+ }
+
+ // for PushService tests.
+ else if (u.pathname === "/pushSubscriptionSuccess/subscribe") {
+ res.setHeader(
+ "Location",
+ "https://localhost:" + serverPort + "/pushSubscriptionSuccesss"
+ );
+ res.setHeader(
+ "Link",
+ '</pushEndpointSuccess>; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointSuccess>; rel="urn:ietf:params:push:receipt"'
+ );
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/pushSubscriptionSuccesss") {
+ // do nothing.
+ return;
+ } else if (u.pathname === "/pushSubscriptionMissingLocation/subscribe") {
+ res.setHeader(
+ "Link",
+ '</pushEndpointMissingLocation>; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointMissingLocation>; rel="urn:ietf:params:push:receipt"'
+ );
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/pushSubscriptionMissingLink/subscribe") {
+ res.setHeader(
+ "Location",
+ "https://localhost:" + serverPort + "/subscriptionMissingLink"
+ );
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/pushSubscriptionLocationBogus/subscribe") {
+ res.setHeader("Location", "1234");
+ res.setHeader(
+ "Link",
+ '</pushEndpointLocationBogus; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointLocationBogus>; rel="urn:ietf:params:push:receipt"'
+ );
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/pushSubscriptionMissingLink1/subscribe") {
+ res.setHeader(
+ "Location",
+ "https://localhost:" + serverPort + "/subscriptionMissingLink1"
+ );
+ res.setHeader(
+ "Link",
+ '</receiptPushEndpointMissingLink1>; rel="urn:ietf:params:push:receipt"'
+ );
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/pushSubscriptionMissingLink2/subscribe") {
+ res.setHeader(
+ "Location",
+ "https://localhost:" + serverPort + "/subscriptionMissingLink2"
+ );
+ res.setHeader(
+ "Link",
+ '</pushEndpointMissingLink2>; rel="urn:ietf:params:push"'
+ );
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/subscriptionMissingLink2") {
+ // do nothing.
+ return;
+ } else if (u.pathname === "/pushSubscriptionNot201Code/subscribe") {
+ res.setHeader(
+ "Location",
+ "https://localhost:" + serverPort + "/subscriptionNot2xxCode"
+ );
+ res.setHeader(
+ "Link",
+ '</pushEndpointNot201Code>; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointNot201Code>; rel="urn:ietf:params:push:receipt"'
+ );
+ res.writeHead(200, "OK");
+ res.end("");
+ return;
+ } else if (u.pathname === "/pushNotifications/subscription1") {
+ pushPushServer1 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushNotificationsDeliver1",
+ method: "GET",
+ headers: {
+ "Encryption-Key":
+ 'keyid="notification1"; dh="BO_tgGm-yvYAGLeRe16AvhzaUcpYRiqgsGOlXpt0DRWDRGGdzVLGlEVJMygqAUECarLnxCiAOHTP_znkedrlWoU"',
+ Encryption: 'keyid="notification1";salt="uAZaiXpOSfOLJxtOCZ09dA"',
+ "Content-Encoding": "aesgcm128",
+ },
+ });
+ pushPushServer1.writeHead(200, {
+ subresource: "1",
+ });
+
+ pushPushServer1.end(
+ "370aeb3963f12c4f12bf946bd0a7a9ee7d3eaff8f7aec62b530fc25cfa",
+ "hex"
+ );
+ return;
+ } else if (u.pathname === "/pushNotifications/subscription2") {
+ pushPushServer2 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushNotificationsDeliver3",
+ method: "GET",
+ headers: {
+ "Encryption-Key":
+ 'keyid="notification2"; dh="BKVdQcgfncpNyNWsGrbecX0zq3eHIlHu5XbCGmVcxPnRSbhjrA6GyBIeGdqsUL69j5Z2CvbZd-9z1UBH0akUnGQ"',
+ Encryption: 'keyid="notification2";salt="vFn3t3M_k42zHBdpch3VRw"',
+ "Content-Encoding": "aesgcm128",
+ },
+ });
+ pushPushServer2.writeHead(200, {
+ subresource: "1",
+ });
+
+ pushPushServer2.end(
+ "66df5d11daa01e5c802ff97cdf7f39684b5bf7c6418a5cf9b609c6826c04b25e403823607ac514278a7da945",
+ "hex"
+ );
+ return;
+ } else if (u.pathname === "/pushNotifications/subscription3") {
+ pushPushServer3 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushNotificationsDeliver3",
+ method: "GET",
+ headers: {
+ "Encryption-Key":
+ 'keyid="notification3";dh="BD3xV_ACT8r6hdIYES3BJj1qhz9wyv7MBrG9vM2UCnjPzwE_YFVpkD-SGqE-BR2--0M-Yf31wctwNsO1qjBUeMg"',
+ Encryption:
+ 'keyid="notification3"; salt="DFq188piWU7osPBgqn4Nlg"; rs=24',
+ "Content-Encoding": "aesgcm128",
+ },
+ });
+ pushPushServer3.writeHead(200, {
+ subresource: "1",
+ });
+
+ pushPushServer3.end(
+ "2caaeedd9cf1059b80c58b6c6827da8ff7de864ac8bea6d5775892c27c005209cbf9c4de0c3fbcddb9711d74eaeebd33f7275374cb42dd48c07168bc2cc9df63e045ce2d2a2408c66088a40c",
+ "hex"
+ );
+ return;
+ } else if (u.pathname == "/pushNotifications/subscription4") {
+ pushPushServer4 = res.push({
+ hostname: "localhost:" + serverPort,
+ port: serverPort,
+ path: "/pushNotificationsDeliver4",
+ method: "GET",
+ headers: {
+ "Crypto-Key":
+ 'keyid="notification4";dh="BJScXUUTcs7D8jJWI1AOxSgAKkF7e56ay4Lek52TqDlWo1yGd5czaxFWfsuP4j7XNWgGYm60-LKpSUMlptxPFVQ"',
+ Encryption: 'keyid="notification4"; salt="sn9p2QqF3V6KBclda8vx7w"',
+ "Content-Encoding": "aesgcm",
+ },
+ });
+ pushPushServer4.writeHead(200, {
+ subresource: "1",
+ });
+
+ pushPushServer4.end(
+ "9eba7ba6192544a39bd9e9b58e702d0748f1776b27f6616cdc55d29ed5a015a6db8f2dd82cd5751a14315546194ff1c18458ab91eb36c9760ccb042670001fd9964557a079553c3591ee131ceb259389cfffab3ab873f873caa6a72e87d262b8684c3260e5940b992234deebf57a9ff3a8775742f3cbcb152d249725a28326717e19cce8506813a155eff5df9bdba9e3ae8801d3cc2b7e7f2f1b6896e63d1fdda6f85df704b1a34db7b2dd63eba11ede154300a318c6f83c41a3d32356a196e36bc905b99195fd91ae4ff3f545c42d17f1fdc1d5bd2bf7516d0765e3a859fffac84f46160b79cedda589f74c25357cf6988cd8ba83867ebd86e4579c9d3b00a712c77fcea3b663007076e21f9819423faa830c2176ff1001c1690f34be26229a191a938517",
+ "hex"
+ );
+ return;
+ } else if (
+ u.pathname === "/pushNotificationsDeliver1" ||
+ u.pathname === "/pushNotificationsDeliver2" ||
+ u.pathname === "/pushNotificationsDeliver3"
+ ) {
+ res.writeHead(410, "GONE");
+ res.end("");
+ return;
+ } else if (u.pathname === "/illegalhpacksoft") {
+ // This will cause the compressor to compress a header that is not legal,
+ // but only affects the stream, not the session.
+ illegalheader_conn = req.stream.connection;
+ res.setHeader("Content-Type", "text/html");
+ res.setHeader("x-softillegalhpack", "true");
+ res.writeHead(200);
+ res.end(content);
+ return;
+ } else if (u.pathname === "/illegalhpackhard") {
+ // This will cause the compressor to insert an HPACK instruction that will
+ // cause a session failure.
+ res.setHeader("Content-Type", "text/html");
+ res.setHeader("x-hardillegalhpack", "true");
+ res.writeHead(200);
+ res.end(content);
+ return;
+ } else if (u.pathname === "/illegalhpack_validate") {
+ if (req.stream.connection === illegalheader_conn) {
+ res.setHeader("X-Did-Goaway", "no");
+ } else {
+ res.setHeader("X-Did-Goaway", "yes");
+ }
+ // Fall through to the default response behavior
+ } else if (u.pathname === "/foldedheader") {
+ res.setHeader("X-Folded-Header", "this is\n folded");
+ // Fall through to the default response behavior
+ } else if (u.pathname === "/emptydata") {
+ // Overwrite the original transform with our version that will insert an
+ // empty DATA frame at the beginning of the stream response, then fall
+ // through to the default response behavior.
+ Serializer.prototype._transform = newTransform;
+ }
+
+ // for use with test_immutable.js
+ else if (u.pathname === "/immutable-test-without-attribute") {
+ res.setHeader("Cache-Control", "max-age=100000");
+ res.setHeader("Etag", "1");
+ if (req.headers["if-none-match"]) {
+ res.setHeader("x-conditional", "true");
+ }
+ // default response from here
+ } else if (u.pathname === "/immutable-test-with-attribute") {
+ res.setHeader("Cache-Control", "max-age=100000, immutable");
+ res.setHeader("Etag", "2");
+ if (req.headers["if-none-match"]) {
+ res.setHeader("x-conditional", "true");
+ }
+ // default response from here
+ } else if (u.pathname === "/origin-4") {
+ let originList = [];
+ req.stream.connection.originFrame(originList);
+ res.setHeader("x-client-port", req.remotePort);
+ } else if (u.pathname === "/origin-6") {
+ let originList = [
+ "https://alt1.example.com:" + serverPort,
+ "https://alt2.example.com:" + serverPort,
+ "https://bar.example.com:" + serverPort,
+ ];
+ req.stream.connection.originFrame(originList);
+ res.setHeader("x-client-port", req.remotePort);
+ } else if (u.pathname === "/origin-11-a") {
+ res.setHeader("x-client-port", req.remotePort);
+
+ const pushb = res.push({
+ hostname: "foo.example.com:" + serverPort,
+ port: serverPort,
+ path: "/origin-11-b",
+ method: "GET",
+ headers: { "x-pushed-request": "true", "x-foo": "bar" },
+ });
+ pushb.writeHead(200, {
+ pushed: "yes",
+ "content-length": 1,
+ });
+ pushb.end("1");
+
+ const pushc = res.push({
+ hostname: "bar.example.com:" + serverPort,
+ port: serverPort,
+ path: "/origin-11-c",
+ method: "GET",
+ headers: { "x-pushed-request": "true", "x-foo": "bar" },
+ });
+ pushc.writeHead(200, {
+ pushed: "yes",
+ "content-length": 1,
+ });
+ pushc.end("1");
+
+ const pushd = res.push({
+ hostname: "madeup.example.com:" + serverPort,
+ port: serverPort,
+ path: "/origin-11-d",
+ method: "GET",
+ headers: { "x-pushed-request": "true", "x-foo": "bar" },
+ });
+ pushd.writeHead(200, {
+ pushed: "yes",
+ "content-length": 1,
+ });
+ pushd.end("1");
+
+ const pushe = res.push({
+ hostname: "alt1.example.com:" + serverPort,
+ port: serverPort,
+ path: "/origin-11-e",
+ method: "GET",
+ headers: { "x-pushed-request": "true", "x-foo": "bar" },
+ });
+ pushe.writeHead(200, {
+ pushed: "yes",
+ "content-length": 1,
+ });
+ pushe.end("1");
+ } else if (u.pathname.substring(0, 8) === "/origin-") {
+ // test_origin.js coalescing
+ res.setHeader("x-client-port", req.remotePort);
+ } else if (u.pathname === "/statusphrase") {
+ // Fortunately, the node-http2 API is dumb enough to allow this right on
+ // through, so we can easily test rejecting this on gecko's end.
+ res.writeHead("200 OK");
+ res.end(content);
+ return;
+ } else if (u.pathname === "/doublepush") {
+ push1 = res.push("/doublypushed");
+ push1.writeHead(200, {
+ "content-type": "text/plain",
+ pushed: "yes",
+ "content-length": 6,
+ "X-Connection-Http2": "yes",
+ });
+ push1.end("pushed");
+
+ push2 = res.push("/doublypushed");
+ push2.writeHead(200, {
+ "content-type": "text/plain",
+ pushed: "yes",
+ "content-length": 6,
+ "X-Connection-Http2": "yes",
+ });
+ push2.end("pushed");
+ } else if (u.pathname === "/doublypushed") {
+ content = "not pushed";
+ } else if (u.pathname === "/diskcache") {
+ content = "this was pulled via h2";
+ } else if (u.pathname === "/pushindisk") {
+ var pushedContent = "this was pushed via h2";
+ push = res.push("/diskcache");
+ push.writeHead(200, {
+ "content-type": "text/html",
+ pushed: "yes",
+ "content-length": pushedContent.length,
+ "X-Connection-Http2": "yes",
+ });
+ push.end(pushedContent);
+ }
+
+ // For test_header_Server_Timing.js
+ else if (u.pathname === "/server-timing") {
+ res.setHeader("Content-Type", "text/plain");
+ res.setHeader("Content-Length", "12");
+ res.setHeader("Trailer", "Server-Timing");
+ res.setHeader(
+ "Server-Timing",
+ "metric; dur=123.4; desc=description, metric2; dur=456.78; desc=description1"
+ );
+ res.write("data reached");
+ res.addTrailers({
+ "Server-Timing":
+ "metric3; dur=789.11; desc=description2, metric4; dur=1112.13; desc=description3",
+ });
+ res.end();
+ return;
+ } else if (u.pathname === "/redirect_to_http") {
+ res.setHeader(
+ "Location",
+ `http://test.httpsrr.redirect.com:${u.query.port}/redirect_to_http`
+ );
+ res.writeHead(307);
+ res.end("");
+ return;
+ } else if (u.pathname === "/103_response") {
+ let link_val = req.headers["link-to-set"];
+ if (link_val) {
+ res.setHeader("link", link_val);
+ }
+ res.setHeader("something", "something");
+ res.writeHead(103);
+
+ res.setHeader("Content-Type", "text/plain");
+ res.setHeader("Content-Length", "12");
+ res.writeHead(200);
+ res.write("data reached");
+ res.end();
+ return;
+ }
+
+ // response headers with invalid characters in the name
+ else if (u.pathname === "/invalid_response_header") {
+ res.setHeader("With Spaces", "Hello");
+ res.setHeader("Without-Spaces", "World");
+ res.writeHead(200);
+ res.end("");
+ return;
+ } else if (u.pathname === "/origin_header") {
+ let originHeader = req.headers.origin;
+ res.setHeader("Content-Length", originHeader.length);
+ res.setHeader("Content-Type", "text/plain");
+ res.writeHead(200);
+ res.write(originHeader);
+ res.end();
+ return;
+ }
+
+ res.setHeader("Content-Type", "text/html");
+ if (req.httpVersionMajor != 2) {
+ res.setHeader("Connection", "close");
+ }
+ res.writeHead(200);
+ res.end(content);
+}
+
+// Set up the SSL certs for our server - this server has a cert for foo.example.com
+// signed by netwerk/tests/unit/http2-ca.pem
+var options = {
+ key: fs.readFileSync(__dirname + "/http2-cert.key"),
+ cert: fs.readFileSync(__dirname + "/http2-cert.pem"),
+};
+
+if (process.env.HTTP2_LOG !== undefined) {
+ var log_module = node_http2_root + "/test/util";
+ options.log = require(log_module).createLogger("server");
+}
+
+var server = http2.createServer(options, handleRequest);
+
+server.on("connection", function(socket) {
+ socket.on("error", function() {
+ // Ignoring SSL socket errors, since they usually represent a connection that was tore down
+ // by the browser because of an untrusted certificate. And this happens at least once, when
+ // the first test case if done.
+ });
+});
+
+server.on("connect", function(req, clientSocket, head) {
+ clientSocket.write(
+ "HTTP/1.1 404 Not Found\r\nProxy-agent: Node.js-Proxy\r\n\r\n"
+ );
+ clientSocket.destroy();
+});
+
+function makeid(length) {
+ var result = "";
+ var characters =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
+ var charactersLength = characters.length;
+ for (var i = 0; i < length; i++) {
+ result += characters.charAt(Math.floor(Math.random() * charactersLength));
+ }
+ return result;
+}
+
+let globalObjects = {};
+var serverPort;
+
+const listen = (serv, envport) => {
+ if (!serv) {
+ return Promise.resolve(0);
+ }
+
+ let portSelection = 0;
+ if (envport !== undefined) {
+ try {
+ portSelection = parseInt(envport, 10);
+ } catch (e) {
+ portSelection = -1;
+ }
+ }
+ return new Promise(resolve => {
+ serv.listen(portSelection, "0.0.0.0", 2000, () => {
+ resolve(serv.address().port);
+ });
+ });
+};
+
+const http = require("http");
+let httpServer = http.createServer((req, res) => {
+ if (req.method != "POST") {
+ let u = url.parse(req.url, true);
+ if (u.pathname == "/test") {
+ // This path is used to test that the server is working properly
+ res.writeHead(200);
+ res.end("OK");
+ return;
+ }
+ res.writeHead(405);
+ res.end("Unexpected method: " + req.method);
+ return;
+ }
+
+ let code = "";
+ req.on("data", function receivePostData(chunk) {
+ code += chunk;
+ });
+ req.on("end", function finishPost() {
+ let u = url.parse(req.url, true);
+ if (u.pathname == "/fork") {
+ let id = forkProcess();
+ computeAndSendBackResponse(id);
+ return;
+ }
+
+ if (u.pathname.startsWith("/kill/")) {
+ let id = u.pathname.slice(6);
+ let forked = globalObjects[id];
+ if (!forked) {
+ computeAndSendBackResponse(undefined, new Error("could not find id"));
+ return;
+ }
+
+ new Promise((resolve, reject) => {
+ forked.resolve = resolve;
+ forked.reject = reject;
+ forked.kill();
+ })
+ .then(x =>
+ computeAndSendBackResponse(
+ undefined,
+ new Error(`incorrectly resolved ${x}`)
+ )
+ )
+ .catch(e => {
+ // We indicate a proper shutdown by resolving with undefined.
+ if (e && e.toString().match(/child process exit closing code/)) {
+ e = undefined;
+ }
+ computeAndSendBackResponse(undefined, e);
+ });
+ return;
+ }
+
+ if (u.pathname.startsWith("/execute/")) {
+ let id = u.pathname.slice(9);
+ let forked = globalObjects[id];
+ if (!forked) {
+ computeAndSendBackResponse(undefined, new Error("could not find id"));
+ return;
+ }
+
+ new Promise((resolve, reject) => {
+ forked.resolve = resolve;
+ forked.reject = reject;
+ forked.send({ code });
+ })
+ .then(x => sendBackResponse(x))
+ .catch(e => computeAndSendBackResponse(undefined, e));
+ }
+
+ function computeAndSendBackResponse(evalResult, e) {
+ let output = { result: evalResult, error: "", errorStack: "" };
+ if (e) {
+ output.error = e.toString();
+ output.errorStack = e.stack;
+ }
+ sendBackResponse(output);
+ }
+
+ function sendBackResponse(output) {
+ output = JSON.stringify(output);
+
+ res.setHeader("Content-Length", output.length);
+ res.setHeader("Content-Type", "application/json");
+ res.writeHead(200);
+ res.write(output);
+ res.end("");
+ }
+ });
+});
+
+function forkProcess() {
+ let scriptPath = path.resolve(__dirname, "moz-http2-child.js");
+ let id = makeid(6);
+ let forked = fork(scriptPath);
+ forked.errors = "";
+ globalObjects[id] = forked;
+ forked.on("message", msg => {
+ if (forked.resolve) {
+ forked.resolve(msg);
+ forked.resolve = null;
+ } else {
+ console.log(
+ `forked process without handler sent: ${JSON.stringify(msg)}`
+ );
+ forked.errors += `forked process without handler sent: ${JSON.stringify(
+ msg
+ )}\n`;
+ }
+ });
+
+ let exitFunction = (code, signal) => {
+ if (globalObjects[id]) {
+ delete globalObjects[id];
+ } else {
+ // already called
+ return;
+ }
+
+ if (!forked.reject) {
+ console.log(
+ `child process ${id} closing code: ${code} signal: ${signal}`
+ );
+ return;
+ }
+
+ if (forked.errors != "") {
+ forked.reject(forked.errors);
+ forked.errors = "";
+ forked.reject = null;
+ return;
+ }
+
+ forked.reject(`child process exit closing code: ${code} signal: ${signal}`);
+ forked.reject = null;
+ };
+
+ forked.on("error", exitFunction);
+ forked.on("close", exitFunction);
+ forked.on("exit", exitFunction);
+
+ return id;
+}
+
+Promise.all([
+ listen(server, process.env.MOZHTTP2_PORT).then(port => (serverPort = port)),
+ listen(httpServer, process.env.MOZNODE_EXEC_PORT),
+]).then(([sPort, nodeExecPort]) => {
+ console.log(`HTTP2 server listening on ports ${sPort},${nodeExecPort}`);
+});
diff --git a/testing/xpcshell/moz-http2/proxy-cert.key b/testing/xpcshell/moz-http2/proxy-cert.key
new file mode 100644
index 0000000000..09e044f5e0
--- /dev/null
+++ b/testing/xpcshell/moz-http2/proxy-cert.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6iFGoRI4W1kH9
+braIBjYQPTwT2erkNUq07PVoV2wke8HHJajg2B+9sZwGm24ahvJr4q9adWtqZHEI
+eqVap0WH9xzVJJwCfs1D/B5p0DggKZOrIMNJ5Nu5TMJrbA7tFYIP8X6taRqx0wI6
+iypB7qdw4A8Njf1mCyuwJJKkfbmIYXmQsVeQPdI7xeC4SB+oN9OIQ+8nFthVt2Za
+qn4CkC86exCABiTMHGyXrZZhW7filhLAdTGjDJHdtMr3/K0dJdMJ77kXDqdo4bN7
+LyJvaeO0ipVhHe4m1iWdq5EITjbLHCQELL8Wiy/l8Y+ZFzG4s/5JI/pyUcQx1QOs
+2hgKNe2NAgMBAAECggEBAJ7LzjhhpFTsseD+j4XdQ8kvWCXOLpl4hNDhqUnaosWs
+VZskBFDlrJ/gw+McDu+mUlpl8MIhlABO4atGPd6e6CKHzJPnRqkZKcXmrD2IdT9s
+JbpZeec+XY+yOREaPNq4pLDN9fnKsF8SM6ODNcZLVWBSXn47kq18dQTPHcfLAFeI
+r8vh6Pld90AqFRUw1YCDRoZOs3CqeZVqWHhiy1M3kTB/cNkcltItABppAJuSPGgz
+iMnzbLm16+ZDAgQceNkIIGuHAJy4yrrK09vbJ5L7kRss9NtmA1hb6a4Mo7jmQXqg
+SwbkcOoaO1gcoDpngckxW2KzDmAR8iRyWUbuxXxtlEECgYEA3W4dT//r9o2InE0R
+TNqqnKpjpZN0KGyKXCmnF7umA3VkTVyqZ0xLi8cyY1hkYiDkVQ12CKwn1Vttt0+N
+gSfvj6CQmLaRR94GVXNEfhg9Iv59iFrOtRPZWB3V4HwakPXOCHneExNx7O/JznLp
+xD3BJ9I4GQ3oEXc8pdGTAfSMdCsCgYEA16dz2evDgKdn0v7Ak0rU6LVmckB3Gs3r
+ta15b0eP7E1FmF77yVMpaCicjYkQL63yHzTi3UlA66jAnW0fFtzClyl3TEMnXpJR
+3b5JCeH9O/Hkvt9Go5uLODMo70rjuVuS8gcK8myefFybWH/t3gXo59hspXiG+xZY
+EKd7mEW8MScCgYEAlkcrQaYQwK3hryJmwWAONnE1W6QtS1oOtOnX6zWBQAul3RMs
+2xpekyjHu8C7sBVeoZKXLt+X0SdR2Pz2rlcqMLHqMJqHEt1OMyQdse5FX8CT9byb
+WS11bmYhR08ywHryL7J100B5KzK6JZC7smGu+5WiWO6lN2VTFb6cJNGRmS0CgYAo
+tFCnp1qFZBOyvab3pj49lk+57PUOOCPvbMjo+ibuQT+LnRIFVA8Su+egx2got7pl
+rYPMpND+KiIBFOGzXQPVqFv+Jwa9UPzmz83VcbRspiG47UfWBbvnZbCqSgZlrCU2
+TaIBVAMuEgS4VZ0+NPtbF3yaVv+TUQpaSmKHwVHeLQKBgCgGe5NVgB0u9S36ltit
+tYlnPPjuipxv9yruq+nva+WKT0q/BfeIlH3IUf2qNFQhR6caJGv7BU7naqNGq80m
+ks/J5ExR5vBpxzXgc7oBn2pyFJYckbJoccrqv48GRBigJpDjmo1f8wZ7fNt/ULH1
+NBinA5ZsT8d0v3QCr2xDJH9D
+-----END PRIVATE KEY-----
diff --git a/testing/xpcshell/moz-http2/proxy-cert.key.keyspec b/testing/xpcshell/moz-http2/proxy-cert.key.keyspec
new file mode 100644
index 0000000000..4ad96d5159
--- /dev/null
+++ b/testing/xpcshell/moz-http2/proxy-cert.key.keyspec
@@ -0,0 +1 @@
+default
diff --git a/testing/xpcshell/moz-http2/proxy-cert.pem b/testing/xpcshell/moz-http2/proxy-cert.pem
new file mode 100644
index 0000000000..2fdbed67ed
--- /dev/null
+++ b/testing/xpcshell/moz-http2/proxy-cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDEzCCAfugAwIBAgIUSQPP46/Ps3zlBla2VmTXZQF0LzkwDQYJKoZIhvcNAQEL
+BQAwGTEXMBUGA1UEAwwOIFByb3h5IFRlc3QgQ0EwIhgPMjAyMjAxMDEwMDAwMDBa
+GA8yMDMyMDEwMTAwMDAwMFowGzEZMBcGA1UEAwwQIFByb3h5IFRlc3QgQ2VydDCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALqIUahEjhbWQf1utogGNhA9
+PBPZ6uQ1SrTs9WhXbCR7wcclqODYH72xnAabbhqG8mvir1p1a2pkcQh6pVqnRYf3
+HNUknAJ+zUP8HmnQOCApk6sgw0nk27lMwmtsDu0Vgg/xfq1pGrHTAjqLKkHup3Dg
+Dw2N/WYLK7AkkqR9uYhheZCxV5A90jvF4LhIH6g304hD7ycW2FW3ZlqqfgKQLzp7
+EIAGJMwcbJetlmFbt+KWEsB1MaMMkd20yvf8rR0l0wnvuRcOp2jhs3svIm9p47SK
+lWEd7ibWJZ2rkQhONsscJAQsvxaLL+Xxj5kXMbiz/kkj+nJRxDHVA6zaGAo17Y0C
+AwEAAaNNMEswSQYDVR0RBEIwQIIJbG9jYWxob3N0gg9mb28uZXhhbXBsZS5jb22C
+EGFsdDEuZXhhbXBsZS5jb22CEGFsdDIuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQEL
+BQADggEBAD64pXpPNZGq0wo+RVjewxqjcEC+nj4de9qVo8nBssIaCCU5nRT5FXh+
+8KERCtP3Q9ZLX4hvSkG5w+Oz0OxBYoad/lp7Ax6CwOCG6/SPIp87MbNzsoJkkubG
++7SAZHjMiHpbh3IVw/m6/nCYTOcmf4E6PgKnGAOT84lBiXbIkbMHbMVskDOHkTO4
+WTz/Kx+OlvvAJD/aj6Yhn7T3bUsHDkzsRJFiPMLqZkPCW0Yl83IuCoNLy8puMAsA
+cYfjVNAiAdVIlwViEUYlHc1jtzH2ZfvPZRylrRHHAPTt8wXffD4WTa9iIqa3x1D4
+/rpcwWElQ/vmNZ7HzBdZZmBRYSd/NVs=
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/moz-http2/proxy-cert.pem.certspec b/testing/xpcshell/moz-http2/proxy-cert.pem.certspec
new file mode 100644
index 0000000000..c7a11d2bd4
--- /dev/null
+++ b/testing/xpcshell/moz-http2/proxy-cert.pem.certspec
@@ -0,0 +1,4 @@
+issuer: Proxy Test CA
+subject: Proxy Test Cert
+validity:20220101-20320101
+extension:subjectAlternativeName:localhost,foo.example.com,alt1.example.com,alt2.example.com
diff --git a/testing/xpcshell/moz.build b/testing/xpcshell/moz.build
new file mode 100644
index 0000000000..727f16eddb
--- /dev/null
+++ b/testing/xpcshell/moz.build
@@ -0,0 +1,15 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+TEST_DIRS += ["example"]
+
+TESTING_JS_MODULES += [
+ "dbg-actors.js",
+]
+
+with Files("**"):
+ BUG_COMPONENT = ("Testing", "XPCShell Harness")
+ SCHEDULES.exclusive = ["xpcshell", "xpcshell-coverage"]
diff --git a/testing/xpcshell/node-http2/.gitignore b/testing/xpcshell/node-http2/.gitignore
new file mode 100644
index 0000000000..bc483625e6
--- /dev/null
+++ b/testing/xpcshell/node-http2/.gitignore
@@ -0,0 +1,7 @@
+node_modules
+.idea
+coverage
+doc
+.vscode/.browse*
+npm-debug.log
+typings \ No newline at end of file
diff --git a/testing/xpcshell/node-http2/.travis.yml b/testing/xpcshell/node-http2/.travis.yml
new file mode 100644
index 0000000000..5ca377d612
--- /dev/null
+++ b/testing/xpcshell/node-http2/.travis.yml
@@ -0,0 +1,5 @@
+ language: node_js
+ node_js:
+ - "iojs"
+ - "0.12"
+
diff --git a/testing/xpcshell/node-http2/HISTORY.md b/testing/xpcshell/node-http2/HISTORY.md
new file mode 100644
index 0000000000..758caa2901
--- /dev/null
+++ b/testing/xpcshell/node-http2/HISTORY.md
@@ -0,0 +1,264 @@
+Version history
+===============
+
+### 3.3.8 (2018-02-15) ###
+* Fix an issue with HTTP trailers and END_STREAM.
+
+### 3.3.7 (2017-09-21) ###
+* Mark as incompatible with node >= 9.0.0 (to encourage using the built-in http2 module available by default in node >= 9.0.0).
+
+### 3.3.6 (2016-09-16) ###
+* We were not appropriately sending HPACK context updates when receiving SETTINGS_HEADER_TABLE_SIZE. This release fixes that bug.
+
+### 3.3.5 (2016-09-06) ###
+* Fix issues with large DATA frames (https://github.com/molnarg/node-http2/issues/207)
+
+### 3.3.4 (2016-04-22) ###
+* More PR bugfixes (https://github.com/molnarg/node-http2/issues?q=milestone%3Av3.3.4)
+
+### 3.3.3 (2016-04-21) ###
+
+* Bugfixes from pull requests (https://github.com/molnarg/node-http2/search?q=milestone%3Av3.3.3&type=Issues&utf8=%E2%9C%93)
+
+### 3.3.2 (2016-01-11) ###
+
+* Fix an incompatibility with Firefox (issue 167)
+
+### 3.3.1 (2016-01-11) ###
+
+* Fix some DoS bugs (issues 145, 146, 147, and 148)
+
+### 3.3.0 (2016-01-10) ###
+
+* Bugfix updates from pull requests
+
+### 3.2.0 (2015-02-19) ###
+
+* Update ALPN token to final RFC version (h2).
+* Update altsvc implementation to draft 06: [draft-ietf-httpbis-alt-svc-06]
+
+[draft-ietf-httpbis-altsvc-06]: http://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-06
+
+### 3.1.2 (2015-02-17) ###
+
+* Update the example server to have a safe push example.
+
+### 3.1.1 (2015-01-29) ###
+
+* Bugfix release.
+* Fixes an issue sending a push promise that is large enough to fill the frame (#93).
+
+### 3.1.0 (2014-12-11) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-16]
+ * This involves some state transition changes that are technically incompatible with draft-14. If you need to be assured to interop on -14, continue using 3.0.1
+
+[draft-ietf-httpbis-http2-16]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-16
+
+### 3.0.1 (2014-11-20) ###
+
+* Bugfix release.
+* Fixed #81 and #87
+* Fixed a bug in flow control (without GitHub issue)
+
+### 3.0.0 (2014-08-25) ###
+
+* Re-join node-http2 and node-http2-protocol into one repository
+* API Changes
+ * The default versions of createServer, request, and get now enforce TLS-only
+ * The raw versions of createServer, request, and get are now under http2.raw instead of http2
+ * What was previously in the http2-protocol repository/module is now available under http2.protocol from this repo/module
+ * http2-protocol.ImplementedVersion is now http2.protocol.VERSION (the ALPN token)
+
+### 2.7.1 (2014-08-01) ###
+
+* Require protocol 0.14.1 (bugfix release)
+
+### 2.7.0 (2014-07-31) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-14]
+
+[draft-ietf-httpbis-http2-14]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-14
+
+### 2.6.0 (2014-06-18) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-13]
+
+[draft-ietf-httpbis-http2-13]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-13
+
+### 2.5.3 (2014-06-15) ###
+
+* Exposing API to send ALTSVC frames
+
+### 2.5.2 (2014-05-25) ###
+
+* Fix a bug that occurs when the ALPN negotiation is unsuccessful
+
+### 2.5.1 (2014-05-25) ###
+
+* Support for node 0.11.x
+* New cipher suite priority list with comformant ciphers on the top (only available in node >=0.11.x)
+
+### 2.5.0 (2014-04-24) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-12]
+
+[draft-ietf-httpbis-http2-12]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-12
+
+### 2.4.0 (2014-04-16) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-11]
+
+[draft-ietf-httpbis-http2-11]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-11
+
+### 2.3.0 (2014-03-12) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-10]
+
+[draft-ietf-httpbis-http2-10]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-10
+
+### 2.2.0 (2013-12-25) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-09]
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.2.0.tar.gz)
+
+[draft-ietf-httpbis-http2-09]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-09
+
+### 2.1.1 (2013-12-21) ###
+
+* Minor bugfix
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.1.1.tar.gz)
+
+### 2.1.0 (2013-11-10) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-07][draft-07]
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.1.0.tar.gz)
+
+[draft-07]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-07
+
+### 2.0.0 (2013-11-09) ###
+
+* Splitting out everything that is not related to negotiating HTTP2 or the node-like HTTP API.
+ These live in separate module from now on:
+ [http2-protocol](https://github.com/molnarg/node-http2-protocol).
+* The only backwards incompatible change: the `Endpoint` class is not exported anymore. Use the
+ http2-protocol module if you want to use this low level interface.
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.0.0.tar.gz)
+
+### 1.0.1 (2013-10-14) ###
+
+* Support for ALPN if node supports it (currently needs a custom build)
+* Fix for a few small issues
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-1.0.1.tar.gz)
+
+### 1.0.0 (2013-09-23) ###
+
+* Exporting Endpoint class
+* Support for 'filters' in Endpoint
+* The last time-based release
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-1.0.0.tar.gz)
+
+### 0.4.1 (2013-09-15) ###
+
+* Major performance improvements
+* Minor improvements to error handling
+* [Blog post](http://gabor.molnar.es/blog/2013/09/15/gsoc-week-number-13/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.4.1.tar.gz)
+
+### 0.4.0 (2013-09-09) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-06][draft-06]
+* Support for HTTP trailers
+* Support for TLS SNI (Server Name Indication)
+* Improved stream scheduling algorithm
+* [Blog post](http://gabor.molnar.es/blog/2013/09/09/gsoc-week-number-12/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.4.0.tar.gz)
+
+[draft-06]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-06
+
+### 0.3.1 (2013-09-03) ###
+
+* Lot of testing, bugfixes
+* [Blog post](http://gabor.molnar.es/blog/2013/09/03/gsoc-week-number-11/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.3.1.tar.gz)
+
+### 0.3.0 (2013-08-27) ###
+
+* Support for prioritization
+* Small API compatibility improvements (compatibility with the standard node.js HTTP API)
+* Minor push API change
+* Ability to pass an external bunyan logger when creating a Server or Agent
+* [Blog post](http://gabor.molnar.es/blog/2013/08/27/gsoc-week-number-10/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.3.0.tar.gz)
+
+### 0.2.1 (2013-08-20) ###
+
+* Fixing a flow control bug
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.2.1.tar.gz)
+
+### 0.2.0 (2013-08-19) ###
+
+* Exposing server push in the public API
+* Connection pooling when operating as client
+* Much better API compatibility with the standard node.js HTTPS module
+* Logging improvements
+* [Blog post](http://gabor.molnar.es/blog/2013/08/19/gsoc-week-number-9/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.2.0.tar.gz)
+
+### 0.1.1 (2013-08-12) ###
+
+* Lots of bugfixes
+* Proper flow control for outgoing frames
+* Basic flow control for incoming frames
+* [Blog post](http://gabor.molnar.es/blog/2013/08/12/gsoc-week-number-8/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.1.1.tar.gz)
+
+### 0.1.0 (2013-08-06) ###
+
+* First release with public API (similar to the standard node HTTPS module)
+* Support for NPN negotiation (no ALPN or Upgrade yet)
+* Stream number limitation is in place
+* Push streams works but not exposed yet in the public API
+* [Blog post](http://gabor.molnar.es/blog/2013/08/05/gsoc-week-number-6-and-number-7/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.1.0.tar.gz)
+
+### 0.0.6 (2013-07-19) ###
+
+* `Connection` and `Endpoint` classes are usable, but not yet ready
+* Addition of an example server and client
+* Using [istanbul](https://github.com/gotwarlost/istanbul) for measuring code coverage
+* [Blog post](http://gabor.molnar.es/blog/2013/07/19/gsoc-week-number-5/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.6.tar.gz)
+
+### 0.0.5 (2013-07-14) ###
+
+* `Stream` class is done
+* Public API stubs are in place
+* [Blog post](http://gabor.molnar.es/blog/2013/07/14/gsoc-week-number-4/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.5.tar.gz)
+
+### 0.0.4 (2013-07-08) ###
+
+* Added logging
+* Started `Stream` class implementation
+* [Blog post](http://gabor.molnar.es/blog/2013/07/08/gsoc-week-number-3/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.4.tar.gz)
+
+### 0.0.3 (2013-07-03) ###
+
+* Header compression is ready
+* [Blog post](http://gabor.molnar.es/blog/2013/07/03/the-http-slash-2-header-compression-implementation-of-node-http2/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.3.tar.gz)
+
+### 0.0.2 (2013-07-01) ###
+
+* Frame serialization and deserialization ready and updated to match the newest spec
+* Header compression implementation started
+* [Blog post](http://gabor.molnar.es/blog/2013/07/01/gsoc-week-number-2/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.2.tar.gz)
+
+### 0.0.1 (2013-06-23) ###
+
+* Frame serialization and deserialization largely done
+* [Blog post](http://gabor.molnar.es/blog/2013/06/23/gsoc-week-number-1/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.1.tar.gz)
diff --git a/testing/xpcshell/node-http2/LICENSE b/testing/xpcshell/node-http2/LICENSE
new file mode 100644
index 0000000000..9bb2e9ce57
--- /dev/null
+++ b/testing/xpcshell/node-http2/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (C) 2013 Gábor Molnár <gabor@molnar.es>, Google Inc
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+'Software'), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/testing/xpcshell/node-http2/README.md b/testing/xpcshell/node-http2/README.md
new file mode 100644
index 0000000000..795e0fd05b
--- /dev/null
+++ b/testing/xpcshell/node-http2/README.md
@@ -0,0 +1,173 @@
+node-http2
+==========
+
+An HTTP/2 ([RFC 7540](http://tools.ietf.org/html/rfc7540))
+client and server implementation for node.js.
+
+![Travis CI status](https://travis-ci.org/molnarg/node-http2.svg?branch=master)
+
+**NOTE WELL** This package is officially deprecated. As of node 9.0.0, there is an 'http2' package built-in. You should use that one instead.
+
+Installation
+------------
+
+```
+npm install http2
+```
+
+API
+---
+
+The API is very similar to the [standard node.js HTTPS API](http://nodejs.org/api/https.html). The
+goal is the perfect API compatibility, with additional HTTP2 related extensions (like server push).
+
+Detailed API documentation is primarily maintained in the `lib/http.js` file and is [available in
+the wiki](https://github.com/molnarg/node-http2/wiki/Public-API) as well.
+
+Examples
+--------
+
+### Using as a server ###
+
+```javascript
+var options = {
+ key: fs.readFileSync('./example/localhost.key'),
+ cert: fs.readFileSync('./example/localhost.crt')
+};
+
+require('http2').createServer(options, function(request, response) {
+ response.end('Hello world!');
+}).listen(8080);
+```
+
+### Using as a client ###
+
+```javascript
+require('http2').get('https://localhost:8080/', function(response) {
+ response.pipe(process.stdout);
+});
+```
+
+### Simple static file server ###
+
+An simple static file server serving up content from its own directory is available in the `example`
+directory. Running the server:
+
+```bash
+$ node ./example/server.js
+```
+
+### Simple command line client ###
+
+An example client is also available. Downloading the server's own source code from the server:
+
+```bash
+$ node ./example/client.js 'https://localhost:8080/server.js' >/tmp/server.js
+```
+
+### Server push ###
+
+For a server push example, see the source code of the example
+[server](https://github.com/molnarg/node-http2/blob/master/example/server.js) and
+[client](https://github.com/molnarg/node-http2/blob/master/example/client.js).
+
+Status
+------
+
+* ALPN is only supported in node.js >= 5.0
+* Upgrade mechanism to start HTTP/2 over unencrypted channel is not implemented yet
+ (issue [#4](https://github.com/molnarg/node-http2/issues/4))
+* Other minor features found in
+ [this list](https://github.com/molnarg/node-http2/issues?labels=feature) are not implemented yet
+
+Development
+-----------
+
+### Development dependencies ###
+
+There's a few library you will need to have installed to do anything described in the following
+sections. After installing/cloning node-http2, run `npm install` in its directory to install
+development dependencies.
+
+Used libraries:
+
+* [mocha](http://visionmedia.github.io/mocha/) for tests
+* [chai](http://chaijs.com/) for assertions
+* [istanbul](https://github.com/gotwarlost/istanbul) for code coverage analysis
+* [docco](http://jashkenas.github.io/docco/) for developer documentation
+* [bunyan](https://github.com/trentm/node-bunyan) for logging
+
+For pretty printing logs, you will also need a global install of bunyan (`npm install -g bunyan`).
+
+### Developer documentation ###
+
+The developer documentation is generated from the source code using docco and can be viewed online
+[here](http://molnarg.github.io/node-http2/doc/). If you'd like to have an offline copy, just run
+`npm run-script doc`.
+
+### Running the tests ###
+
+It's easy, just run `npm test`. The tests are written in BDD style, so they are a good starting
+point to understand the code.
+
+### Test coverage ###
+
+To generate a code coverage report, run `npm test --coverage` (which runs very slowly, be patient).
+Code coverage summary as of version 3.0.1:
+```
+Statements : 92.09% ( 1759/1910 )
+Branches : 82.56% ( 696/843 )
+Functions : 91.38% ( 212/232 )
+Lines : 92.17% ( 1753/1902 )
+```
+
+There's a hosted version of the detailed (line-by-line) coverage report
+[here](http://molnarg.github.io/node-http2/coverage/lcov-report/lib/).
+
+### Logging ###
+
+Logging is turned off by default. You can turn it on by passing a bunyan logger as `log` option when
+creating a server or agent.
+
+When using the example server or client, it's very easy to turn logging on: set the `HTTP2_LOG`
+environment variable to `fatal`, `error`, `warn`, `info`, `debug` or `trace` (the logging level).
+To log every single incoming and outgoing data chunk, use `HTTP2_LOG_DATA=1` besides
+`HTTP2_LOG=trace`. Log output goes to the standard error output. If the standard error is redirected
+into a file, then the log output is in bunyan's JSON format for easier post-mortem analysis.
+
+Running the example server and client with `info` level logging output:
+
+```bash
+$ HTTP2_LOG=info node ./example/server.js
+```
+
+```bash
+$ HTTP2_LOG=info node ./example/client.js 'https://localhost:8080/server.js' >/dev/null
+```
+
+Contributors
+------------
+
+The co-maintainer of the project is [Nick Hurley](https://github.com/todesschaf).
+
+Code contributions are always welcome! People who contributed to node-http2 so far:
+
+* [Nick Hurley](https://github.com/todesschaf)
+* [Mike Belshe](https://github.com/mbelshe)
+* [Yoshihiro Iwanaga](https://github.com/iwanaga)
+* [Igor Novikov](https://github.com/vsemogutor)
+* [James Willcox](https://github.com/snorp)
+* [David Björklund](https://github.com/kesla)
+* [Patrick McManus](https://github.com/mcmanus)
+
+Special thanks to Google for financing the development of this module as part of their [Summer of
+Code program](https://developers.google.com/open-source/soc/) (project: [HTTP/2 prototype server
+implementation](https://google-melange.appspot.com/gsoc/project/details/google/gsoc2013/molnarg/5818821692620800)), and
+Nick Hurley of Mozilla, my GSoC mentor, who helped with regular code review and technical advices.
+
+License
+-------
+
+The MIT License
+
+Copyright (C) 2013 Gábor Molnár <gabor@molnar.es>
diff --git a/testing/xpcshell/node-http2/example/client.js b/testing/xpcshell/node-http2/example/client.js
new file mode 100644
index 0000000000..75a4bc011b
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/client.js
@@ -0,0 +1,48 @@
+var fs = require('fs');
+var path = require('path');
+var http2 = require('..');
+var urlParse = require('url').parse;
+
+// Setting the global logger (optional)
+http2.globalAgent = new http2.Agent({
+ rejectUnauthorized: true,
+ log: require('../test/util').createLogger('client')
+});
+
+// Sending the request
+var url = process.argv.pop();
+var options = urlParse(url);
+
+// Optionally verify self-signed certificates.
+if (options.hostname == 'localhost') {
+ options.key = fs.readFileSync(path.join(__dirname, '/localhost.key'));
+ options.ca = fs.readFileSync(path.join(__dirname, '/localhost.crt'));
+}
+
+var request = process.env.HTTP2_PLAIN ? http2.raw.get(options) : http2.get(options);
+
+// Receiving the response
+request.on('response', function(response) {
+ response.pipe(process.stdout);
+ response.on('end', finish);
+});
+
+// Receiving push streams
+request.on('push', function(pushRequest) {
+ var filename = path.join(__dirname, '/push-' + push_count);
+ push_count += 1;
+ console.error('Receiving pushed resource: ' + pushRequest.url + ' -> ' + filename);
+ pushRequest.on('response', function(pushResponse) {
+ pushResponse.pipe(fs.createWriteStream(filename)).on('finish', finish);
+ });
+});
+
+// Quitting after both the response and the associated pushed resources have arrived
+var push_count = 0;
+var finished = 0;
+function finish() {
+ finished += 1;
+ if (finished === (1 + push_count)) {
+ process.exit();
+ }
+}
diff --git a/testing/xpcshell/node-http2/example/localhost.crt b/testing/xpcshell/node-http2/example/localhost.crt
new file mode 100644
index 0000000000..c4e4d2e96d
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/localhost.crt
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICDTCCAXYCCQC7iiBVXeTv1DANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJI
+VTETMBEGA1UECBMKU29tZS1TdGF0ZTETMBEGA1UEChMKbm9kZS1odHRwMjESMBAG
+A1UEAxMJbG9jYWxob3N0MB4XDTE0MTIwMjE4NDcwNFoXDTI0MTEyOTE4NDcwNFow
+SzELMAkGA1UEBhMCSFUxEzARBgNVBAgTClNvbWUtU3RhdGUxEzARBgNVBAoTCm5v
+ZGUtaHR0cDIxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB
+jQAwgYkCgYEA8As7rj7xdD+RuAmORju9NI+jtOScGgiAbfovaFyzTu0O0H9SCExi
+u6e2iXMRfzomTix/yjRvbdHEXfgONG1MnKUc0oC4GxHXshyMDEXq9LadgAmR/nDL
+UVT0eo7KqC21ufaca2nVS9qOdlSCE/p7IJdb2+BF1RmuC9pHpXvFW20CAwEAATAN
+BgkqhkiG9w0BAQUFAAOBgQDn8c/9ho9L08dOqEJ2WTBmv4dfRC3oTWR/0oIGsaXb
+RhQONy5CJv/ymPYE7nCFWTMaia+w8oFqMie/aNZ7VK6L+hafuUS93IjuTXVN++JP
+4948B0BBagvXGTwNtvm/1sZHLrXTkH1dbRUEF8M+KUSRUu2zJgm+e1bD8WTKQOIL
+NA==
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/node-http2/example/localhost.key b/testing/xpcshell/node-http2/example/localhost.key
new file mode 100644
index 0000000000..6e1de62642
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/localhost.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDwCzuuPvF0P5G4CY5GO700j6O05JwaCIBt+i9oXLNO7Q7Qf1II
+TGK7p7aJcxF/OiZOLH/KNG9t0cRd+A40bUycpRzSgLgbEdeyHIwMRer0tp2ACZH+
+cMtRVPR6jsqoLbW59pxradVL2o52VIIT+nsgl1vb4EXVGa4L2kele8VbbQIDAQAB
+AoGAKKB+FVup2hb4PsG/RrvNphu5hWA721wdAIAbjfpCjtUocLlb1PO4sjIMfu7u
+wy3AVfLKHhsJ0Phz18OoA8+L65NMoMRsHOGaLEnGIJzJcnDLT5+uTFN5di0a1+UK
+BzB828rlHBNoQisogVCoKTYlCPJAZuI3trEzupWAV28XjTECQQD5LUEwYq4xr62L
+dEq5Qj/+c5paK/jrEBY83VZUmWzYsFgUwmpdku2ITRILQlOM33j6rk8krZZb93sb
+38ydmfwjAkEA9p30zyjOI9kKqTl9WdYNYtIXpyNGYa+Pga33o9pawTewiyS2uCYs
+wnQQV26bQ0YwQqLQhtIbo4fzCO6Ex0w7LwJBANHNbd8cp4kEX35U+3nDM3i+w477
+CUp6sA6tWrw+tqw4xuEr1T1WshOauP+r6AdsPkPsMo0yb7CdzxVoObPVbLsCQQCc
+sx0cjEb/TCeUAy186Z+zzN6umqFb7Jt4wLt7Z4EHCIWqw/c95zPFks3XYDZTdsOv
+c5igMdzR+c4ZPMUthWiNAkByx7If12G1Z/R2Y0vIB0WJq4BJnZCZ0mRR0oAmPoA+
+sZbmwctZ3IU+68Rgr4EAhrU04ygjF67IiNyXX0qqu3VH
+-----END RSA PRIVATE KEY-----
diff --git a/testing/xpcshell/node-http2/example/server.js b/testing/xpcshell/node-http2/example/server.js
new file mode 100644
index 0000000000..66d8f895d1
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/server.js
@@ -0,0 +1,67 @@
+var fs = require('fs');
+var path = require('path');
+var http2 = require('..');
+
+// We cache one file to be able to do simple performance tests without waiting for the disk
+var cachedFile = fs.readFileSync(path.join(__dirname, './server.js'));
+var cachedUrl = '/server.js';
+
+// The callback to handle requests
+function onRequest(request, response) {
+ var filename = path.join(__dirname, request.url);
+
+ // Serving server.js from cache. Useful for microbenchmarks.
+ if (request.url === cachedUrl) {
+ if (response.push) {
+ // Also push down the client js, since it's possible if the requester wants
+ // one, they want both.
+ var push = response.push('/client.js');
+ push.writeHead(200);
+ fs.createReadStream(path.join(__dirname, '/client.js')).pipe(push);
+ }
+ response.end(cachedFile);
+ }
+
+ // Reading file from disk if it exists and is safe.
+ else if ((filename.indexOf(__dirname) === 0) && fs.existsSync(filename) && fs.statSync(filename).isFile()) {
+ response.writeHead(200);
+ var fileStream = fs.createReadStream(filename);
+ fileStream.pipe(response);
+ fileStream.on('finish',response.end);
+ }
+
+ // Example for testing large (boundary-sized) frames.
+ else if (request.url === "/largeframe") {
+ response.writeHead(200);
+ var body = 'a';
+ for (var i = 0; i < 14; i++) {
+ body += body;
+ }
+ body = body + 'a';
+ response.end(body);
+ }
+
+ // Otherwise responding with 404.
+ else {
+ response.writeHead(404);
+ response.end();
+ }
+}
+
+// Creating a bunyan logger (optional)
+var log = require('../test/util').createLogger('server');
+
+// Creating the server in plain or TLS mode (TLS mode is the default)
+var server;
+if (process.env.HTTP2_PLAIN) {
+ server = http2.raw.createServer({
+ log: log
+ }, onRequest);
+} else {
+ server = http2.createServer({
+ log: log,
+ key: fs.readFileSync(path.join(__dirname, '/localhost.key')),
+ cert: fs.readFileSync(path.join(__dirname, '/localhost.crt'))
+ }, onRequest);
+}
+server.listen(process.env.HTTP2_PORT || 8080);
diff --git a/testing/xpcshell/node-http2/lib/http.js b/testing/xpcshell/node-http2/lib/http.js
new file mode 100644
index 0000000000..5690bb9e79
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/http.js
@@ -0,0 +1,1276 @@
+// Public API
+// ==========
+
+// The main governing power behind the http2 API design is that it should look very similar to the
+// existing node.js [HTTPS API][1] (which is, in turn, almost identical to the [HTTP API][2]). The
+// additional features of HTTP/2 are exposed as extensions to this API. Furthermore, node-http2
+// should fall back to using HTTP/1.1 if needed. Compatibility with undocumented or deprecated
+// elements of the node.js HTTP/HTTPS API is a non-goal.
+//
+// Additional and modified API elements
+// ------------------------------------
+//
+// - **Class: http2.Endpoint**: an API for using the raw HTTP/2 framing layer. For documentation
+// see [protocol/endpoint.js](protocol/endpoint.html).
+//
+// - **Class: http2.Server**
+// - **Event: 'connection' (socket, [endpoint])**: there's a second argument if the negotiation of
+// HTTP/2 was successful: the reference to the [Endpoint](protocol/endpoint.html) object tied to the
+// socket.
+//
+// - **http2.createServer(options, [requestListener])**: additional option:
+// - **log**: an optional [bunyan](https://github.com/trentm/node-bunyan) logger object
+//
+// - **Class: http2.ServerResponse**
+// - **response.push(options)**: initiates a server push. `options` describes the 'imaginary'
+// request to which the push stream is a response; the possible options are identical to the
+// ones accepted by `http2.request`. Returns a ServerResponse object that can be used to send
+// the response headers and content.
+//
+// - **Class: http2.Agent**
+// - **new Agent(options)**: additional option:
+// - **log**: an optional [bunyan](https://github.com/trentm/node-bunyan) logger object
+// - **agent.sockets**: only contains TCP sockets that corresponds to HTTP/1 requests.
+// - **agent.endpoints**: contains [Endpoint](protocol/endpoint.html) objects for HTTP/2 connections.
+//
+// - **http2.request(options, [callback])**:
+// - similar to http.request
+//
+// - **http2.get(options, [callback])**:
+// - similar to http.get
+//
+// - **Class: http2.ClientRequest**
+// - **Event: 'socket' (socket)**: in case of an HTTP/2 incoming message, `socket` is a reference
+// to the associated [HTTP/2 Stream](protocol/stream.html) object (and not to the TCP socket).
+// - **Event: 'push' (promise)**: signals the intention of a server push associated to this
+// request. `promise` is an IncomingPromise. If there's no listener for this event, the server
+// push is cancelled.
+// - **request.setPriority(priority)**: assign a priority to this request. `priority` is a number
+// between 0 (highest priority) and 2^31-1 (lowest priority). Default value is 2^30.
+//
+// - **Class: http2.IncomingMessage**
+// - has two subclasses for easier interface description: **IncomingRequest** and
+// **IncomingResponse**
+// - **message.socket**: in case of an HTTP/2 incoming message, it's a reference to the associated
+// [HTTP/2 Stream](protocol/stream.html) object (and not to the TCP socket).
+//
+// - **Class: http2.IncomingRequest (IncomingMessage)**
+// - **message.url**: in case of an HTTP/2 incoming request, the `url` field always contains the
+// path, and never a full url (it contains the path in most cases in the HTTPS api as well).
+// - **message.scheme**: additional field. Mandatory HTTP/2 request metadata.
+// - **message.host**: additional field. Mandatory HTTP/2 request metadata. Note that this
+// replaces the old Host header field, but node-http2 will add Host to the `message.headers` for
+// backwards compatibility.
+//
+// - **Class: http2.IncomingPromise (IncomingRequest)**
+// - contains the metadata of the 'imaginary' request to which the server push is an answer.
+// - **Event: 'response' (response)**: signals the arrival of the actual push stream. `response`
+// is an IncomingResponse.
+// - **Event: 'push' (promise)**: signals the intention of a server push associated to this
+// request. `promise` is an IncomingPromise. If there's no listener for this event, the server
+// push is cancelled.
+// - **promise.cancel()**: cancels the promised server push.
+// - **promise.setPriority(priority)**: assign a priority to this push stream. `priority` is a
+// number between 0 (highest priority) and 2^31-1 (lowest priority). Default value is 2^30.
+//
+// API elements not yet implemented
+// --------------------------------
+//
+// - **Class: http2.Server**
+// - **server.maxHeadersCount**
+//
+// API elements that are not applicable to HTTP/2
+// ----------------------------------------------
+//
+// The reason may be deprecation of certain HTTP/1.1 features, or that some API elements simply
+// don't make sense when using HTTP/2. These will not be present when a request is done with HTTP/2,
+// but will function normally when falling back to using HTTP/1.1.
+//
+// - **Class: http2.Server**
+// - **Event: 'checkContinue'**: not in the spec
+// - **Event: 'upgrade'**: upgrade is deprecated in HTTP/2
+// - **Event: 'timeout'**: HTTP/2 sockets won't timeout because of application level keepalive
+// (PING frames)
+// - **Event: 'connect'**: not yet supported
+// - **server.setTimeout(msecs, [callback])**
+// - **server.timeout**
+//
+// - **Class: http2.ServerResponse**
+// - **Event: 'close'**
+// - **Event: 'timeout'**
+// - **response.writeContinue()**
+// - **response.writeHead(statusCode, [reasonPhrase], [headers])**: reasonPhrase will always be
+// ignored since [it's not supported in HTTP/2][3]
+// - **response.setTimeout(timeout, [callback])**
+//
+// - **Class: http2.Agent**
+// - **agent.maxSockets**: only affects HTTP/1 connection pool. When using HTTP/2, there's always
+// one connection per host.
+//
+// - **Class: http2.ClientRequest**
+// - **Event: 'upgrade'**
+// - **Event: 'connect'**
+// - **Event: 'continue'**
+// - **request.setTimeout(timeout, [callback])**
+// - **request.setNoDelay([noDelay])**
+// - **request.setSocketKeepAlive([enable], [initialDelay])**
+//
+// - **Class: http2.IncomingMessage**
+// - **Event: 'close'**
+// - **message.setTimeout(timeout, [callback])**
+//
+// [1]: https://nodejs.org/api/https.html
+// [2]: https://nodejs.org/api/http.html
+// [3]: https://tools.ietf.org/html/rfc7540#section-8.1.2.4
+
+// Common server and client side code
+// ==================================
+
+var net = require('net');
+var url = require('url');
+var util = require('util');
+var EventEmitter = require('events').EventEmitter;
+var PassThrough = require('stream').PassThrough;
+var Readable = require('stream').Readable;
+var Writable = require('stream').Writable;
+var protocol = require('./protocol');
+var Endpoint = protocol.Endpoint;
+var http = require('http');
+var https = require('https');
+
+exports.STATUS_CODES = http.STATUS_CODES;
+exports.IncomingMessage = IncomingMessage;
+exports.OutgoingMessage = OutgoingMessage;
+exports.protocol = protocol;
+
+var deprecatedHeaders = [
+ 'connection',
+ 'host',
+ 'keep-alive',
+ 'proxy-connection',
+ 'transfer-encoding',
+ 'upgrade'
+];
+
+// When doing NPN/ALPN negotiation, HTTP/1.1 is used as fallback
+var supportedProtocols = [protocol.VERSION, 'http/1.1', 'http/1.0'];
+
+// Ciphersuite list based on the recommendations of https://wiki.mozilla.org/Security/Server_Side_TLS
+// The only modification is that kEDH+AESGCM were placed after DHE and ECDHE suites
+var cipherSuites = [
+ 'ECDHE-RSA-AES128-GCM-SHA256',
+ 'ECDHE-ECDSA-AES128-GCM-SHA256',
+ 'ECDHE-RSA-AES256-GCM-SHA384',
+ 'ECDHE-ECDSA-AES256-GCM-SHA384',
+ 'DHE-RSA-AES128-GCM-SHA256',
+ 'DHE-DSS-AES128-GCM-SHA256',
+ 'ECDHE-RSA-AES128-SHA256',
+ 'ECDHE-ECDSA-AES128-SHA256',
+ 'ECDHE-RSA-AES128-SHA',
+ 'ECDHE-ECDSA-AES128-SHA',
+ 'ECDHE-RSA-AES256-SHA384',
+ 'ECDHE-ECDSA-AES256-SHA384',
+ 'ECDHE-RSA-AES256-SHA',
+ 'ECDHE-ECDSA-AES256-SHA',
+ 'DHE-RSA-AES128-SHA256',
+ 'DHE-RSA-AES128-SHA',
+ 'DHE-DSS-AES128-SHA256',
+ 'DHE-RSA-AES256-SHA256',
+ 'DHE-DSS-AES256-SHA',
+ 'DHE-RSA-AES256-SHA',
+ 'kEDH+AESGCM',
+ 'AES128-GCM-SHA256',
+ 'AES256-GCM-SHA384',
+ 'ECDHE-RSA-RC4-SHA',
+ 'ECDHE-ECDSA-RC4-SHA',
+ 'AES128',
+ 'AES256',
+ 'RC4-SHA',
+ 'HIGH',
+ '!aNULL',
+ '!eNULL',
+ '!EXPORT',
+ '!DES',
+ '!3DES',
+ '!MD5',
+ '!PSK'
+].join(':');
+
+// Logging
+// -------
+
+// Logger shim, used when no logger is provided by the user.
+function noop() {}
+var defaultLogger = {
+ fatal: noop,
+ error: noop,
+ warn : noop,
+ info : noop,
+ debug: noop,
+ trace: noop,
+
+ child: function() { return this; }
+};
+
+// Bunyan serializers exported by submodules that are worth adding when creating a logger.
+exports.serializers = protocol.serializers;
+
+// IncomingMessage class
+// ---------------------
+
+function IncomingMessage(stream) {
+ // * This is basically a read-only wrapper for the [Stream](protocol/stream.html) class.
+ PassThrough.call(this);
+ stream.pipe(this);
+ this.socket = this.stream = stream;
+
+ this._log = stream._log.child({ component: 'http' });
+
+ // * HTTP/2.0 does not define a way to carry the version identifier that is included in the
+ // HTTP/1.1 request/status line. Version is always 2.0.
+ this.httpVersion = '2.0';
+ this.httpVersionMajor = 2;
+ this.httpVersionMinor = 0;
+
+ // * `this.headers` will store the regular headers (and none of the special colon headers)
+ this.headers = {};
+ this.trailers = undefined;
+ this._lastHeadersSeen = undefined;
+
+ // * Other metadata is filled in when the headers arrive.
+ stream.once('headers', this._onHeaders.bind(this));
+ stream.once('end', this._onEnd.bind(this));
+}
+IncomingMessage.prototype = Object.create(PassThrough.prototype, { constructor: { value: IncomingMessage } });
+
+// [Request Header Fields](https://tools.ietf.org/html/rfc7540#section-8.1.2.3)
+// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
+// of key-value pairs. This includes the target URI for the request, the status code for the
+// response, as well as HTTP header fields.
+IncomingMessage.prototype._onHeaders = function _onHeaders(headers) {
+ // * Detects malformed headers
+ this._validateHeaders(headers);
+
+ // * Store the _regular_ headers in `this.headers`
+ for (var name in headers) {
+ if (name[0] !== ':') {
+ if (name === 'set-cookie' && !Array.isArray(headers[name])) {
+ this.headers[name] = [headers[name]];
+ } else {
+ this.headers[name] = headers[name];
+ }
+ }
+ }
+
+ // * The last header block, if it's not the first, will represent the trailers
+ var self = this;
+ this.stream.on('headers', function(headers) {
+ self._lastHeadersSeen = headers;
+ });
+};
+
+IncomingMessage.prototype._onEnd = function _onEnd() {
+ this.trailers = this._lastHeadersSeen;
+};
+
+IncomingMessage.prototype.setTimeout = noop;
+
+IncomingMessage.prototype._checkSpecialHeader = function _checkSpecialHeader(key, value) {
+ if ((typeof value !== 'string') || (value.length === 0)) {
+ this._log.error({ key: key, value: value }, 'Invalid or missing special header field');
+ this.stream.reset('PROTOCOL_ERROR');
+ }
+
+ return value;
+};
+
+IncomingMessage.prototype._validateHeaders = function _validateHeaders(headers) {
+ // * An HTTP/2.0 request or response MUST NOT include any of the following header fields:
+ // Connection, Host, Keep-Alive, Proxy-Connection, Transfer-Encoding, and Upgrade. A server
+ // MUST treat the presence of any of these header fields as a stream error of type
+ // PROTOCOL_ERROR.
+ // If the TE header is present, it's only valid value is 'trailers'
+ for (var i = 0; i < deprecatedHeaders.length; i++) {
+ var key = deprecatedHeaders[i];
+ if (key in headers || (key === 'te' && headers[key] !== 'trailers')) {
+ this._log.error({ key: key, value: headers[key] }, 'Deprecated header found');
+ this.stream.reset('PROTOCOL_ERROR');
+ return;
+ }
+ }
+
+ for (var headerName in headers) {
+ // * Empty header name field is malformed
+ if (headerName.length <= 1) {
+ this.stream.reset('PROTOCOL_ERROR');
+ return;
+ }
+ // * A request or response containing uppercase header name field names MUST be
+ // treated as malformed (Section 8.1.3.5). Implementations that detect malformed
+ // requests or responses need to ensure that the stream ends.
+ if(/[A-Z]/.test(headerName)) {
+ this.stream.reset('PROTOCOL_ERROR');
+ return;
+ }
+ }
+};
+
+// OutgoingMessage class
+// ---------------------
+
+function OutgoingMessage() {
+ // * This is basically a read-only wrapper for the [Stream](protocol/stream.html) class.
+ Writable.call(this);
+
+ this._headers = {};
+ this._trailers = undefined;
+ this.headersSent = false;
+ this.finished = false;
+
+ this.on('finish', this._finish);
+}
+OutgoingMessage.prototype = Object.create(Writable.prototype, { constructor: { value: OutgoingMessage } });
+
+OutgoingMessage.prototype._write = function _write(chunk, encoding, callback) {
+ if (this.stream) {
+ this.stream.write(chunk, encoding, callback);
+ } else {
+ this.once('socket', this._write.bind(this, chunk, encoding, callback));
+ }
+};
+
+OutgoingMessage.prototype._finish = function _finish() {
+ if (this.stream) {
+ if (this._trailers) {
+ if (this.request) {
+ this.request.addTrailers(this._trailers);
+ } else {
+ this.stream.trailers(this._trailers);
+ }
+ }
+ this.finished = true;
+ this.stream.end();
+ } else {
+ this.once('socket', this._finish.bind(this));
+ }
+};
+
+OutgoingMessage.prototype.setHeader = function setHeader(name, value) {
+ if (this.headersSent) {
+ return this.emit('error', new Error('Can\'t set headers after they are sent.'));
+ } else {
+ name = name.toLowerCase();
+ if (deprecatedHeaders.indexOf(name) !== -1) {
+ return this.emit('error', new Error('Cannot set deprecated header: ' + name));
+ }
+ this._headers[name] = value;
+ }
+};
+
+OutgoingMessage.prototype.removeHeader = function removeHeader(name) {
+ if (this.headersSent) {
+ return this.emit('error', new Error('Can\'t remove headers after they are sent.'));
+ } else {
+ delete this._headers[name.toLowerCase()];
+ }
+};
+
+OutgoingMessage.prototype.getHeader = function getHeader(name) {
+ return this._headers[name.toLowerCase()];
+};
+
+OutgoingMessage.prototype.addTrailers = function addTrailers(trailers) {
+ this._trailers = trailers;
+};
+
+OutgoingMessage.prototype.setTimeout = noop;
+
+OutgoingMessage.prototype._checkSpecialHeader = IncomingMessage.prototype._checkSpecialHeader;
+
+// Server side
+// ===========
+
+exports.Server = Server;
+exports.IncomingRequest = IncomingRequest;
+exports.OutgoingResponse = OutgoingResponse;
+exports.ServerResponse = OutgoingResponse; // for API compatibility
+
+// Forward events `event` on `source` to all listeners on `target`.
+//
+// Note: The calling context is `source`.
+function forwardEvent(event, source, target) {
+ function forward() {
+ var listeners = target.listeners(event);
+
+ var n = listeners.length;
+
+ // Special case for `error` event with no listeners.
+ if (n === 0 && event === 'error') {
+ var args = [event];
+ args.push.apply(args, arguments);
+
+ target.emit.apply(target, args);
+ return;
+ }
+
+ for (var i = 0; i < n; ++i) {
+ listeners[i].apply(source, arguments);
+ }
+ }
+
+ source.on(event, forward);
+
+ // A reference to the function is necessary to be able to stop
+ // forwarding.
+ return forward;
+}
+
+// Server class
+// ------------
+
+function Server(options) {
+ options = util._extend({}, options);
+
+ this._log = (options.log || defaultLogger).child({ component: 'http' });
+ this._settings = options.settings;
+
+ var start = this._start.bind(this);
+ var fallback = this._fallback.bind(this);
+
+ // HTTP2 over TLS (using NPN or ALPN)
+ if ((options.key && options.cert) || options.pfx) {
+ this._log.info('Creating HTTP/2 server over TLS');
+ this._mode = 'tls';
+ options.ALPNProtocols = supportedProtocols;
+ options.NPNProtocols = supportedProtocols;
+ options.ciphers = options.ciphers || cipherSuites;
+ options.honorCipherOrder = (options.honorCipherOrder != false);
+ this._server = https.createServer(options);
+ this._originalSocketListeners = this._server.listeners('secureConnection');
+ this._server.removeAllListeners('secureConnection');
+ this._server.on('secureConnection', function(socket) {
+ var negotiatedProtocol = socket.alpnProtocol || socket.npnProtocol;
+ // It's true that the client MUST use SNI, but if it doesn't, we don't care, don't fall back to HTTP/1,
+ // since if the ALPN negotiation is otherwise successful, the client thinks we speak HTTP/2 but we don't.
+ if (negotiatedProtocol === protocol.VERSION) {
+ start(socket);
+ } else {
+ fallback(socket);
+ }
+ });
+ this._server.on('request', this.emit.bind(this, 'request'));
+ this._server.on('connect', this.emit.bind(this, 'connect'));
+
+ forwardEvent('error', this._server, this);
+ forwardEvent('listening', this._server, this);
+ }
+
+ // HTTP2 over plain TCP
+ else if (options.plain) {
+ this._log.info('Creating HTTP/2 server over plain TCP');
+ this._mode = 'plain';
+ this._server = net.createServer(start);
+ }
+
+ // HTTP/2 with HTTP/1.1 upgrade
+ else {
+ this._log.error('Trying to create HTTP/2 server with Upgrade from HTTP/1.1');
+ throw new Error('HTTP1.1 -> HTTP2 upgrade is not yet supported. Please provide TLS keys.');
+ }
+
+ this._server.on('close', this.emit.bind(this, 'close'));
+}
+Server.prototype = Object.create(EventEmitter.prototype, { constructor: { value: Server } });
+
+// Starting HTTP/2
+Server.prototype._start = function _start(socket) {
+ var endpoint = new Endpoint(this._log, 'SERVER', this._settings);
+
+ this._log.info({ e: endpoint,
+ client: socket.remoteAddress + ':' + socket.remotePort,
+ SNI: socket.servername
+ }, 'New incoming HTTP/2 connection');
+
+ endpoint.pipe(socket).pipe(endpoint);
+
+ var self = this;
+ endpoint.on('stream', function _onStream(stream) {
+ var response = new OutgoingResponse(stream);
+ var request = new IncomingRequest(stream);
+
+ // Some conformance to Node.js Https specs allows to distinguish clients:
+ request.remoteAddress = socket.remoteAddress;
+ request.remotePort = socket.remotePort;
+ request.connection = request.socket = response.socket = socket;
+
+ request.once('ready', self.emit.bind(self, 'request', request, response));
+ });
+
+ endpoint.on('error', this.emit.bind(this, 'clientError'));
+ socket.on('error', this.emit.bind(this, 'clientError'));
+
+ this.emit('connection', socket, endpoint);
+};
+
+Server.prototype._fallback = function _fallback(socket) {
+ var negotiatedProtocol = socket.alpnProtocol || socket.npnProtocol;
+
+ this._log.info({ client: socket.remoteAddress + ':' + socket.remotePort,
+ protocol: negotiatedProtocol,
+ SNI: socket.servername
+ }, 'Falling back to simple HTTPS');
+
+ for (var i = 0; i < this._originalSocketListeners.length; i++) {
+ this._originalSocketListeners[i].call(this._server, socket);
+ }
+
+ this.emit('connection', socket);
+};
+
+// There are [3 possible signatures][1] of the `listen` function. Every arguments is forwarded to
+// the backing TCP or HTTPS server.
+// [1]: https://nodejs.org/api/http.html#http_server_listen_port_hostname_backlog_callback
+Server.prototype.listen = function listen(port, hostname) {
+ this._log.info({ on: ((typeof hostname === 'string') ? (hostname + ':' + port) : port) },
+ 'Listening for incoming connections');
+ this._server.listen.apply(this._server, arguments);
+
+ return this._server;
+};
+
+Server.prototype.close = function close(callback) {
+ this._log.info('Closing server');
+ this._server.close(callback);
+};
+
+Server.prototype.setTimeout = function setTimeout(timeout, callback) {
+ if (this._mode === 'tls') {
+ this._server.setTimeout(timeout, callback);
+ }
+};
+
+Object.defineProperty(Server.prototype, 'timeout', {
+ get: function getTimeout() {
+ if (this._mode === 'tls') {
+ return this._server.timeout;
+ } else {
+ return undefined;
+ }
+ },
+ set: function setTimeout(timeout) {
+ if (this._mode === 'tls') {
+ this._server.timeout = timeout;
+ }
+ }
+});
+
+// Overriding `EventEmitter`'s `on(event, listener)` method to forward certain subscriptions to
+// `server`.There are events on the `http.Server` class where it makes difference whether someone is
+// listening on the event or not. In these cases, we can not simply forward the events from the
+// `server` to `this` since that means a listener. Instead, we forward the subscriptions.
+Server.prototype.on = function on(event, listener) {
+ if ((event === 'upgrade') || (event === 'timeout')) {
+ return this._server.on(event, listener && listener.bind(this));
+ } else {
+ return EventEmitter.prototype.on.call(this, event, listener);
+ }
+};
+
+// `addContext` is used to add Server Name Indication contexts
+Server.prototype.addContext = function addContext(hostname, credentials) {
+ if (this._mode === 'tls') {
+ this._server.addContext(hostname, credentials);
+ }
+};
+
+Server.prototype.address = function address() {
+ return this._server.address()
+};
+
+function createServerRaw(options, requestListener) {
+ if (typeof options === 'function') {
+ requestListener = options;
+ options = {};
+ }
+
+ if (options.pfx || (options.key && options.cert)) {
+ throw new Error('options.pfx, options.key, and options.cert are nonsensical!');
+ }
+
+ options.plain = true;
+ var server = new Server(options);
+
+ if (requestListener) {
+ server.on('request', requestListener);
+ }
+
+ return server;
+}
+
+function createServerTLS(options, requestListener) {
+ if (typeof options === 'function') {
+ throw new Error('options are required!');
+ }
+ if (!options.pfx && !(options.key && options.cert)) {
+ throw new Error('options.pfx or options.key and options.cert are required!');
+ }
+ options.plain = false;
+
+ var server = new Server(options);
+
+ if (requestListener) {
+ server.on('request', requestListener);
+ }
+
+ return server;
+}
+
+// Exposed main interfaces for HTTPS connections (the default)
+exports.https = {};
+exports.createServer = exports.https.createServer = createServerTLS;
+exports.request = exports.https.request = requestTLS;
+exports.get = exports.https.get = getTLS;
+
+// Exposed main interfaces for raw TCP connections (not recommended)
+exports.raw = {};
+exports.raw.createServer = createServerRaw;
+exports.raw.request = requestRaw;
+exports.raw.get = getRaw;
+
+// Exposed main interfaces for HTTP plaintext upgrade connections (not implemented)
+function notImplemented() {
+ throw new Error('HTTP UPGRADE is not implemented!');
+}
+
+exports.http = {};
+exports.http.createServer = exports.http.request = exports.http.get = notImplemented;
+
+// IncomingRequest class
+// ---------------------
+
+function IncomingRequest(stream) {
+ IncomingMessage.call(this, stream);
+}
+IncomingRequest.prototype = Object.create(IncomingMessage.prototype, { constructor: { value: IncomingRequest } });
+
+// [Request Header Fields](https://tools.ietf.org/html/rfc7540#section-8.1.2.3)
+// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
+// of key-value pairs. This includes the target URI for the request, the status code for the
+// response, as well as HTTP header fields.
+IncomingRequest.prototype._onHeaders = function _onHeaders(headers) {
+ // * The ":method" header field includes the HTTP method
+ // * The ":scheme" header field includes the scheme portion of the target URI
+ // * The ":authority" header field includes the authority portion of the target URI
+ // * The ":path" header field includes the path and query parts of the target URI.
+ // This field MUST NOT be empty; URIs that do not contain a path component MUST include a value
+ // of '/', unless the request is an OPTIONS request for '*', in which case the ":path" header
+ // field MUST include '*'.
+ // * All HTTP/2.0 requests MUST include exactly one valid value for all of these header fields. A
+ // server MUST treat the absence of any of these header fields, presence of multiple values, or
+ // an invalid value as a stream error of type PROTOCOL_ERROR.
+ this.method = this._checkSpecialHeader(':method' , headers[':method']);
+ this.host = this._checkSpecialHeader(':authority', headers[':authority'] );
+ if (this.method == "CONNECT") {
+ this.scheme = headers[':scheme'];
+ this.url = headers[':path'];
+ if (!this.method || !this.host) {
+ // This is invalid, and we've sent a RST_STREAM, so don't continue processing
+ return;
+ }
+ } else {
+ this.scheme = this._checkSpecialHeader(':scheme' , headers[':scheme']);
+ this.url = this._checkSpecialHeader(':path' , headers[':path'] );
+ if (!this.method || !this.scheme || !this.host || !this.url) {
+ // This is invalid, and we've sent a RST_STREAM, so don't continue processing
+ return;
+ }
+ }
+
+ // * Host header is included in the headers object for backwards compatibility.
+ this.headers.host = this.host;
+
+ // * Handling regular headers.
+ IncomingMessage.prototype._onHeaders.call(this, headers);
+
+ // * Signaling that the headers arrived.
+ this._log.info({ method: this.method, scheme: this.scheme, host: this.host,
+ path: this.url, headers: this.headers }, 'Incoming request');
+ this.emit('ready');
+};
+
+// OutgoingResponse class
+// ----------------------
+
+function OutgoingResponse(stream) {
+ OutgoingMessage.call(this);
+
+ this._log = stream._log.child({ component: 'http' });
+
+ this.stream = stream;
+ this.statusCode = 200;
+ this.sendDate = true;
+
+ this.stream.once('headers', this._onRequestHeaders.bind(this));
+}
+OutgoingResponse.prototype = Object.create(OutgoingMessage.prototype, { constructor: { value: OutgoingResponse } });
+
+OutgoingResponse.prototype.writeHead = function writeHead(statusCode, reasonPhrase, headers) {
+ if (this.headersSent) {
+ return;
+ }
+
+ if (typeof reasonPhrase === 'string') {
+ this._log.warn('Reason phrase argument was present but ignored by the writeHead method');
+ } else {
+ headers = reasonPhrase;
+ }
+
+ for (var name in headers) {
+ this.setHeader(name, headers[name]);
+ }
+ headers = this._headers;
+
+ if (this.sendDate && !('date' in this._headers)) {
+ headers.date = (new Date()).toUTCString();
+ }
+
+ this._log.info({ status: statusCode, headers: this._headers }, 'Sending server response');
+
+ headers[':status'] = this.statusCode = statusCode;
+
+ this.stream.headers(headers);
+ if (statusCode >= 200) {
+ this.headersSent = true;
+ } else {
+ this._headers = {};
+ }
+};
+
+OutgoingResponse.prototype._implicitHeaders = function _implicitHeaders() {
+ if (!this.headersSent) {
+ this.writeHead(this.statusCode);
+ }
+};
+
+OutgoingResponse.prototype._implicitHeader = function() {
+ this._implicitHeaders();
+};
+
+OutgoingResponse.prototype.write = function write() {
+ this._implicitHeaders();
+ return OutgoingMessage.prototype.write.apply(this, arguments);
+};
+
+OutgoingResponse.prototype.end = function end() {
+ this.finshed = true;
+ this._implicitHeaders();
+ return OutgoingMessage.prototype.end.apply(this, arguments);
+};
+
+OutgoingResponse.prototype._onRequestHeaders = function _onRequestHeaders(headers) {
+ this._requestHeaders = headers;
+};
+
+OutgoingResponse.prototype.push = function push(options) {
+ if (typeof options === 'string') {
+ options = url.parse(options);
+ }
+
+ if (!options.path) {
+ throw new Error('`path` option is mandatory.');
+ }
+
+ var promise = util._extend({
+ ':method': (options.method || 'GET').toUpperCase(),
+ ':scheme': (options.protocol && options.protocol.slice(0, -1)) || this._requestHeaders[':scheme'],
+ ':authority': options.hostname || options.host || this._requestHeaders[':authority'],
+ ':path': options.path
+ }, options.headers);
+
+ this._log.info({ method: promise[':method'], scheme: promise[':scheme'],
+ authority: promise[':authority'], path: promise[':path'],
+ headers: options.headers }, 'Promising push stream');
+
+ var pushStream = this.stream.promise(promise);
+
+ return new OutgoingResponse(pushStream);
+};
+
+OutgoingResponse.prototype.altsvc = function altsvc(host, port, protocolID, maxAge, origin) {
+ if (origin === undefined) {
+ origin = "";
+ }
+ this.stream.altsvc(host, port, protocolID, maxAge, origin);
+};
+
+// Overriding `EventEmitter`'s `on(event, listener)` method to forward certain subscriptions to
+// `request`. See `Server.prototype.on` for explanation.
+OutgoingResponse.prototype.on = function on(event, listener) {
+ if (this.request && (event === 'timeout')) {
+ this.request.on(event, listener && listener.bind(this));
+ } else {
+ OutgoingMessage.prototype.on.call(this, event, listener);
+ }
+};
+
+// Client side
+// ===========
+
+exports.ClientRequest = OutgoingRequest; // for API compatibility
+exports.OutgoingRequest = OutgoingRequest;
+exports.IncomingResponse = IncomingResponse;
+exports.Agent = Agent;
+exports.globalAgent = undefined;
+
+function requestRaw(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = true;
+ if (options.protocol && options.protocol !== "http:") {
+ throw new Error('This interface only supports http-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.request) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.request(agentOptions, callback);
+ }
+ return exports.globalAgent.request(options, callback);
+}
+
+function requestTLS(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = false;
+ if (options.protocol && options.protocol !== "https:") {
+ throw new Error('This interface only supports https-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.request) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.request(agentOptions, callback);
+ }
+ return exports.globalAgent.request(options, callback);
+}
+
+function getRaw(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = true;
+ if (options.protocol && options.protocol !== "http:") {
+ throw new Error('This interface only supports http-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.get) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.get(agentOptions, callback);
+ }
+ return exports.globalAgent.get(options, callback);
+}
+
+function getTLS(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = false;
+ if (options.protocol && options.protocol !== "https:") {
+ throw new Error('This interface only supports https-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.get) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.get(agentOptions, callback);
+ }
+ return exports.globalAgent.get(options, callback);
+}
+
+// Agent class
+// -----------
+
+function Agent(options) {
+ EventEmitter.call(this);
+ this.setMaxListeners(0);
+
+ options = util._extend({}, options);
+
+ this._settings = options.settings;
+ this._log = (options.log || defaultLogger).child({ component: 'http' });
+ this.endpoints = {};
+
+ // * Using an own HTTPS agent, because the global agent does not look at `NPN/ALPNProtocols` when
+ // generating the key identifying the connection, so we may get useless non-negotiated TLS
+ // channels even if we ask for a negotiated one. This agent will contain only negotiated
+ // channels.
+ options.ALPNProtocols = supportedProtocols;
+ options.NPNProtocols = supportedProtocols;
+ this._httpsAgent = new https.Agent(options);
+
+ this.sockets = this._httpsAgent.sockets;
+ this.requests = this._httpsAgent.requests;
+}
+Agent.prototype = Object.create(EventEmitter.prototype, { constructor: { value: Agent } });
+
+Agent.prototype.request = function request(options, callback) {
+ if (typeof options === 'string') {
+ options = url.parse(options);
+ } else {
+ options = util._extend({}, options);
+ }
+
+ options.method = (options.method || 'GET').toUpperCase();
+ options.protocol = options.protocol || 'https:';
+ options.host = options.hostname || options.host || 'localhost';
+ options.port = options.port || 443;
+ options.path = options.path || '/';
+
+ if (!options.plain && options.protocol === 'http:') {
+ this._log.error('Trying to negotiate client request with Upgrade from HTTP/1.1');
+ this.emit('error', new Error('HTTP1.1 -> HTTP2 upgrade is not yet supported.'));
+ }
+
+ var request = new OutgoingRequest(this._log);
+
+ if (callback) {
+ request.on('response', callback);
+ }
+
+ var key = [
+ !!options.plain,
+ options.host,
+ options.port
+ ].join(':');
+ var self = this;
+
+ // * There's an existing HTTP/2 connection to this host
+ if (key in this.endpoints) {
+ var endpoint = this.endpoints[key];
+ request._start(endpoint.createStream(), options);
+ }
+
+ // * HTTP/2 over plain TCP
+ else if (options.plain) {
+ endpoint = new Endpoint(this._log, 'CLIENT', this._settings);
+ endpoint.socket = net.connect({
+ host: options.host,
+ port: options.port,
+ localAddress: options.localAddress
+ });
+
+ endpoint.socket.on('error', function (error) {
+ self._log.error('Socket error: ' + error.toString());
+ request.emit('error', error);
+ });
+
+ endpoint.on('error', function(error){
+ self._log.error('Connection error: ' + error.toString());
+ request.emit('error', error);
+ });
+
+ this.endpoints[key] = endpoint;
+ endpoint.pipe(endpoint.socket).pipe(endpoint);
+ request._start(endpoint.createStream(), options);
+ }
+
+ // * HTTP/2 over TLS negotiated using NPN or ALPN, or fallback to HTTPS1
+ else {
+ var started = false;
+ var createAgent = hasAgentOptions(options);
+ options.ALPNProtocols = supportedProtocols;
+ options.NPNProtocols = supportedProtocols;
+ options.servername = options.host; // Server Name Indication
+ options.ciphers = options.ciphers || cipherSuites;
+ if (createAgent) {
+ options.agent = new https.Agent(options);
+ } else if (options.agent == null) {
+ options.agent = this._httpsAgent;
+ }
+ var httpsRequest = https.request(options);
+
+ httpsRequest.on('error', function (error) {
+ self._log.error('Socket error: ' + error.toString());
+ self.removeAllListeners(key);
+ request.emit('error', error);
+ });
+
+ httpsRequest.on('socket', function(socket) {
+ var negotiatedProtocol = socket.alpnProtocol || socket.npnProtocol;
+ if (negotiatedProtocol != null) { // null in >=0.11.0, undefined in <0.11.0
+ negotiated();
+ } else {
+ socket.on('secureConnect', negotiated);
+ }
+ });
+
+ function negotiated() {
+ var endpoint;
+ var negotiatedProtocol = httpsRequest.socket.alpnProtocol || httpsRequest.socket.npnProtocol;
+ if (negotiatedProtocol === protocol.VERSION) {
+ httpsRequest.socket.emit('agentRemove');
+ unbundleSocket(httpsRequest.socket);
+ endpoint = new Endpoint(self._log, 'CLIENT', self._settings);
+ endpoint.socket = httpsRequest.socket;
+ endpoint.pipe(endpoint.socket).pipe(endpoint);
+ }
+ if (started) {
+ // ** In the meantime, an other connection was made to the same host...
+ if (endpoint) {
+ // *** and it turned out to be HTTP2 and the request was multiplexed on that one, so we should close this one
+ endpoint.close();
+ }
+ // *** otherwise, the fallback to HTTPS1 is already done.
+ } else {
+ if (endpoint) {
+ self._log.info({ e: endpoint, server: options.host + ':' + options.port },
+ 'New outgoing HTTP/2 connection');
+ self.endpoints[key] = endpoint;
+ self.emit(key, endpoint);
+ } else {
+ self.emit(key, undefined);
+ }
+ }
+ }
+
+ this.once(key, function(endpoint) {
+ started = true;
+ if (endpoint) {
+ request._start(endpoint.createStream(), options);
+ } else {
+ request._fallback(httpsRequest);
+ }
+ });
+ }
+
+ return request;
+};
+
+Agent.prototype.get = function get(options, callback) {
+ var request = this.request(options, callback);
+ request.end();
+ return request;
+};
+
+Agent.prototype.destroy = function(error) {
+ if (this._httpsAgent) {
+ this._httpsAgent.destroy();
+ }
+ for (var key in this.endpoints) {
+ this.endpoints[key].close(error);
+ }
+};
+
+function unbundleSocket(socket) {
+ socket.removeAllListeners('data');
+ socket.removeAllListeners('end');
+ socket.removeAllListeners('readable');
+ socket.removeAllListeners('close');
+ socket.removeAllListeners('error');
+ socket.unpipe();
+ delete socket.ondata;
+ delete socket.onend;
+}
+
+function hasAgentOptions(options) {
+ return options.pfx != null ||
+ options.key != null ||
+ options.passphrase != null ||
+ options.cert != null ||
+ options.ca != null ||
+ options.ciphers != null ||
+ options.rejectUnauthorized != null ||
+ options.secureProtocol != null;
+}
+
+Object.defineProperty(Agent.prototype, 'maxSockets', {
+ get: function getMaxSockets() {
+ return this._httpsAgent.maxSockets;
+ },
+ set: function setMaxSockets(value) {
+ this._httpsAgent.maxSockets = value;
+ }
+});
+
+exports.globalAgent = new Agent();
+
+// OutgoingRequest class
+// ---------------------
+
+function OutgoingRequest() {
+ OutgoingMessage.call(this);
+
+ this._log = undefined;
+
+ this.stream = undefined;
+}
+OutgoingRequest.prototype = Object.create(OutgoingMessage.prototype, { constructor: { value: OutgoingRequest } });
+
+OutgoingRequest.prototype._start = function _start(stream, options) {
+ this.stream = stream;
+ this.options = options;
+
+ this._log = stream._log.child({ component: 'http' });
+
+ for (var key in options.headers) {
+ this.setHeader(key, options.headers[key]);
+ }
+ var headers = this._headers;
+ delete headers.host;
+
+ if (options.auth) {
+ headers.authorization = 'Basic ' + Buffer.from(options.auth).toString('base64');
+ }
+
+ headers[':scheme'] = options.protocol.slice(0, -1);
+ headers[':method'] = options.method;
+ headers[':authority'] = options.host;
+ headers[':path'] = options.path;
+
+ this._log.info({ scheme: headers[':scheme'], method: headers[':method'],
+ authority: headers[':authority'], path: headers[':path'],
+ headers: (options.headers || {}) }, 'Sending request');
+ this.stream.headers(headers);
+ this.headersSent = true;
+
+ this.emit('socket', this.stream);
+ var response = new IncomingResponse(this.stream);
+ response.req = this;
+ response.once('ready', this.emit.bind(this, 'response', response));
+
+ this.stream.on('promise', this._onPromise.bind(this));
+};
+
+OutgoingRequest.prototype._fallback = function _fallback(request) {
+ request.on('response', this.emit.bind(this, 'response'));
+ this.stream = this.request = request;
+ this.emit('socket', this.socket);
+};
+
+OutgoingRequest.prototype.setPriority = function setPriority(priority) {
+ if (this.stream) {
+ this.stream.priority(priority);
+ } else {
+ this.once('socket', this.setPriority.bind(this, priority));
+ }
+};
+
+// Overriding `EventEmitter`'s `on(event, listener)` method to forward certain subscriptions to
+// `request`. See `Server.prototype.on` for explanation.
+OutgoingRequest.prototype.on = function on(event, listener) {
+ if (this.request && (event === 'upgrade')) {
+ this.request.on(event, listener && listener.bind(this));
+ } else {
+ OutgoingMessage.prototype.on.call(this, event, listener);
+ }
+};
+
+// Methods only in fallback mode
+OutgoingRequest.prototype.setNoDelay = function setNoDelay(noDelay) {
+ if (this.request) {
+ this.request.setNoDelay(noDelay);
+ } else if (!this.stream) {
+ this.on('socket', this.setNoDelay.bind(this, noDelay));
+ }
+};
+
+OutgoingRequest.prototype.setSocketKeepAlive = function setSocketKeepAlive(enable, initialDelay) {
+ if (this.request) {
+ this.request.setSocketKeepAlive(enable, initialDelay);
+ } else if (!this.stream) {
+ this.on('socket', this.setSocketKeepAlive.bind(this, enable, initialDelay));
+ }
+};
+
+OutgoingRequest.prototype.setTimeout = function setTimeout(timeout, callback) {
+ if (this.request) {
+ this.request.setTimeout(timeout, callback);
+ } else if (!this.stream) {
+ this.on('socket', this.setTimeout.bind(this, timeout, callback));
+ }
+};
+
+// Aborting the request
+OutgoingRequest.prototype.abort = function abort() {
+ if (this.request) {
+ this.request.abort();
+ } else if (this.stream) {
+ this.stream.reset('CANCEL');
+ } else {
+ this.on('socket', this.abort.bind(this));
+ }
+};
+
+// Receiving push promises
+OutgoingRequest.prototype._onPromise = function _onPromise(stream, headers) {
+ this._log.info({ push_stream: stream.id }, 'Receiving push promise');
+
+ var promise = new IncomingPromise(stream, headers);
+
+ if (this.listeners('push').length > 0) {
+ this.emit('push', promise);
+ } else {
+ promise.cancel();
+ }
+};
+
+// IncomingResponse class
+// ----------------------
+
+function IncomingResponse(stream) {
+ IncomingMessage.call(this, stream);
+}
+IncomingResponse.prototype = Object.create(IncomingMessage.prototype, { constructor: { value: IncomingResponse } });
+
+// [Response Header Fields](https://tools.ietf.org/html/rfc7540#section-8.1.2.4)
+// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
+// of key-value pairs. This includes the target URI for the request, the status code for the
+// response, as well as HTTP header fields.
+IncomingResponse.prototype._onHeaders = function _onHeaders(headers) {
+ // * A single ":status" header field is defined that carries the HTTP status code field. This
+ // header field MUST be included in all responses.
+ // * A client MUST treat the absence of the ":status" header field, the presence of multiple
+ // values, or an invalid value as a stream error of type PROTOCOL_ERROR.
+ // Note: currently, we do not enforce it strictly: we accept any format, and parse it as int
+ // * HTTP/2.0 does not define a way to carry the reason phrase that is included in an HTTP/1.1
+ // status line.
+ this.statusCode = parseInt(this._checkSpecialHeader(':status', headers[':status']));
+
+ // * Handling regular headers.
+ IncomingMessage.prototype._onHeaders.call(this, headers);
+
+ // * Signaling that the headers arrived.
+ this._log.info({ status: this.statusCode, headers: this.headers}, 'Incoming response');
+ this.emit('ready');
+};
+
+// IncomingPromise class
+// -------------------------
+
+function IncomingPromise(responseStream, promiseHeaders) {
+ var stream = new Readable();
+ stream._read = noop;
+ stream.push(null);
+ stream._log = responseStream._log;
+
+ IncomingRequest.call(this, stream);
+
+ this._onHeaders(promiseHeaders);
+
+ this._responseStream = responseStream;
+
+ var response = new IncomingResponse(this._responseStream);
+ response.once('ready', this.emit.bind(this, 'response', response));
+
+ this.stream.on('promise', this._onPromise.bind(this));
+}
+IncomingPromise.prototype = Object.create(IncomingRequest.prototype, { constructor: { value: IncomingPromise } });
+
+IncomingPromise.prototype.cancel = function cancel() {
+ this._responseStream.reset('CANCEL');
+};
+
+IncomingPromise.prototype.setPriority = function setPriority(priority) {
+ this._responseStream.priority(priority);
+};
+
+IncomingPromise.prototype._onPromise = OutgoingRequest.prototype._onPromise;
diff --git a/testing/xpcshell/node-http2/lib/index.js b/testing/xpcshell/node-http2/lib/index.js
new file mode 100644
index 0000000000..c67883defe
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/index.js
@@ -0,0 +1,52 @@
+// [node-http2][homepage] is an [HTTP/2][http2] implementation for [node.js][node].
+//
+// The core of the protocol is implemented in the protocol sub-directory. This directory provides
+// two important features on top of the protocol:
+//
+// * Implementation of different negotiation schemes that can be used to start a HTTP2 connection.
+// These include TLS ALPN, Upgrade and Plain TCP.
+//
+// * Providing an API very similar to the standard node.js [HTTPS module API][node-https]
+// (which is in turn very similar to the [HTTP module API][node-http]).
+//
+// [homepage]: https://github.com/molnarg/node-http2
+// [http2]: https://tools.ietf.org/html/rfc7540
+// [node]: https://nodejs.org/
+// [node-https]: https://nodejs.org/api/https.html
+// [node-http]: https://nodejs.org/api/http.html
+
+module.exports = require('./http');
+
+/*
+ HTTP API
+
+ | ^
+ | |
+ +-------------|------------|------------------------------------------------------+
+ | | | Server/Agent |
+ | v | |
+ | +----------+ +----------+ |
+ | | Outgoing | | Incoming | |
+ | | req/res. | | req/res. | |
+ | +----------+ +----------+ |
+ | | ^ |
+ | | | |
+ | +---------|------------|-------------------------------------+ +----- |
+ | | | | Endpoint | | |
+ | | | | | | |
+ | | v | | | |
+ | | +-----------------------+ +-------------------- | | |
+ | | | Stream | | Stream ... | | |
+ | | +-----------------------+ +-------------------- | | |
+ | | | | |
+ | +------------------------------------------------------------+ +----- |
+ | | | |
+ | | | |
+ | v | |
+ | +------------------------------------------------------------+ +----- |
+ | | TCP stream | | ... |
+ | +------------------------------------------------------------+ +----- |
+ | |
+ +---------------------------------------------------------------------------------+
+
+*/
diff --git a/testing/xpcshell/node-http2/lib/protocol/compressor.js b/testing/xpcshell/node-http2/lib/protocol/compressor.js
new file mode 100644
index 0000000000..6f91f86ec9
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/compressor.js
@@ -0,0 +1,1428 @@
+// The implementation of the [HTTP/2 Header Compression][http2-compression] spec is separated from
+// the 'integration' part which handles HEADERS and PUSH_PROMISE frames. The compression itself is
+// implemented in the first part of the file, and consists of three classes: `HeaderTable`,
+// `HeaderSetDecompressor` and `HeaderSetCompressor`. The two latter classes are
+// [Transform Stream][node-transform] subclasses that operate in [object mode][node-objectmode].
+// These transform chunks of binary data into `[name, value]` pairs and vice versa, and store their
+// state in `HeaderTable` instances.
+//
+// The 'integration' part is also implemented by two [Transform Stream][node-transform] subclasses
+// that operate in [object mode][node-objectmode]: the `Compressor` and the `Decompressor`. These
+// provide a layer between the [framer](framer.html) and the
+// [connection handling component](connection.html).
+//
+// [node-transform]: https://nodejs.org/api/stream.html#stream_class_stream_transform
+// [node-objectmode]: https://nodejs.org/api/stream.html#stream_new_stream_readable_options
+// [http2-compression]: https://tools.ietf.org/html/rfc7541
+
+exports.HeaderTable = HeaderTable;
+exports.HuffmanTable = HuffmanTable;
+exports.HeaderSetCompressor = HeaderSetCompressor;
+exports.HeaderSetDecompressor = HeaderSetDecompressor;
+exports.Compressor = Compressor;
+exports.Decompressor = Decompressor;
+
+var TransformStream = require('stream').Transform;
+var assert = require('assert');
+var util = require('util');
+
+// Header compression
+// ==================
+
+// The HeaderTable class
+// ---------------------
+
+// The [Header Table] is a component used to associate headers to index values. It is basically an
+// ordered list of `[name, value]` pairs, so it's implemented as a subclass of `Array`.
+// In this implementation, the Header Table and the [Static Table] are handled as a single table.
+// [Header Table]: https://tools.ietf.org/html/rfc7541#section-2.3.2
+// [Static Table]: https://tools.ietf.org/html/rfc7541#section-2.3.1
+function HeaderTable(log, limit) {
+ var self = HeaderTable.staticTable.map(entryFromPair);
+ self._log = log;
+ self._limit = limit || DEFAULT_HEADER_TABLE_LIMIT;
+ self._staticLength = self.length;
+ self._size = 0;
+ self._enforceLimit = HeaderTable.prototype._enforceLimit;
+ self.add = HeaderTable.prototype.add;
+ self.setSizeLimit = HeaderTable.prototype.setSizeLimit;
+ return self;
+}
+
+function entryFromPair(pair) {
+ var entry = pair.slice();
+ entry._size = size(entry);
+ return entry;
+}
+
+// The encoder decides how to update the header table and as such can control how much memory is
+// used by the header table. To limit the memory requirements on the decoder side, the header table
+// size is bounded.
+//
+// * The default header table size limit is 4096 bytes.
+// * The size of an entry is defined as follows: the size of an entry is the sum of its name's
+// length in bytes, of its value's length in bytes and of 32 bytes.
+// * The size of a header table is the sum of the size of its entries.
+var DEFAULT_HEADER_TABLE_LIMIT = 4096;
+
+function size(entry) {
+ return (Buffer.from(entry[0] + entry[1], 'utf8')).length + 32;
+}
+
+// The `add(index, entry)` can be used to [manage the header table][tablemgmt]:
+// [tablemgmt]: https://tools.ietf.org/html/rfc7541#section-4
+//
+// * it pushes the new `entry` at the beggining of the table
+// * before doing such a modification, it has to be ensured that the header table size will stay
+// lower than or equal to the header table size limit. To achieve this, entries are evicted from
+// the end of the header table until the size of the header table is less than or equal to
+// `(this._limit - entry.size)`, or until the table is empty.
+//
+// <---------- Index Address Space ---------->
+// <-- Static Table --> <-- Header Table -->
+// +---+-----------+---+ +---+-----------+---+
+// | 0 | ... | k | |k+1| ... | n |
+// +---+-----------+---+ +---+-----------+---+
+// ^ |
+// | V
+// Insertion Point Drop Point
+
+HeaderTable.prototype._enforceLimit = function _enforceLimit(limit) {
+ var droppedEntries = [];
+ while ((this._size > 0) && (this._size > limit)) {
+ var dropped = this.pop();
+ this._size -= dropped._size;
+ droppedEntries.unshift(dropped);
+ }
+ return droppedEntries;
+};
+
+HeaderTable.prototype.add = function(entry) {
+ var limit = this._limit - entry._size;
+ var droppedEntries = this._enforceLimit(limit);
+
+ if (this._size <= limit) {
+ this.splice(this._staticLength, 0, entry);
+ this._size += entry._size;
+ }
+
+ return droppedEntries;
+};
+
+// The table size limit can be changed externally. In this case, the same eviction algorithm is used
+HeaderTable.prototype.setSizeLimit = function setSizeLimit(limit) {
+ this._limit = limit;
+ this._enforceLimit(this._limit);
+};
+
+// [The Static Table](https://tools.ietf.org/html/rfc7541#section-2.3.1)
+// ------------------
+
+// The table is generated with feeding the table from the spec to the following sed command:
+//
+// sed -re "s/\s*\| [0-9]+\s*\| ([^ ]*)/ [ '\1'/g" -e "s/\|\s([^ ]*)/, '\1'/g" -e 's/ \|/],/g'
+
+HeaderTable.staticTable = [
+ [ ':authority' , '' ],
+ [ ':method' , 'GET' ],
+ [ ':method' , 'POST' ],
+ [ ':path' , '/' ],
+ [ ':path' , '/index.html' ],
+ [ ':scheme' , 'http' ],
+ [ ':scheme' , 'https' ],
+ [ ':status' , '200' ],
+ [ ':status' , '204' ],
+ [ ':status' , '206' ],
+ [ ':status' , '304' ],
+ [ ':status' , '400' ],
+ [ ':status' , '404' ],
+ [ ':status' , '500' ],
+ [ 'accept-charset' , '' ],
+ [ 'accept-encoding' , 'gzip, deflate'],
+ [ 'accept-language' , '' ],
+ [ 'accept-ranges' , '' ],
+ [ 'accept' , '' ],
+ [ 'access-control-allow-origin' , '' ],
+ [ 'age' , '' ],
+ [ 'allow' , '' ],
+ [ 'authorization' , '' ],
+ [ 'cache-control' , '' ],
+ [ 'content-disposition' , '' ],
+ [ 'content-encoding' , '' ],
+ [ 'content-language' , '' ],
+ [ 'content-length' , '' ],
+ [ 'content-location' , '' ],
+ [ 'content-range' , '' ],
+ [ 'content-type' , '' ],
+ [ 'cookie' , '' ],
+ [ 'date' , '' ],
+ [ 'etag' , '' ],
+ [ 'expect' , '' ],
+ [ 'expires' , '' ],
+ [ 'from' , '' ],
+ [ 'host' , '' ],
+ [ 'if-match' , '' ],
+ [ 'if-modified-since' , '' ],
+ [ 'if-none-match' , '' ],
+ [ 'if-range' , '' ],
+ [ 'if-unmodified-since' , '' ],
+ [ 'last-modified' , '' ],
+ [ 'link' , '' ],
+ [ 'location' , '' ],
+ [ 'max-forwards' , '' ],
+ [ 'proxy-authenticate' , '' ],
+ [ 'proxy-authorization' , '' ],
+ [ 'range' , '' ],
+ [ 'referer' , '' ],
+ [ 'refresh' , '' ],
+ [ 'retry-after' , '' ],
+ [ 'server' , '' ],
+ [ 'set-cookie' , '' ],
+ [ 'strict-transport-security' , '' ],
+ [ 'transfer-encoding' , '' ],
+ [ 'user-agent' , '' ],
+ [ 'vary' , '' ],
+ [ 'via' , '' ],
+ [ 'www-authenticate' , '' ]
+];
+
+// The HeaderSetDecompressor class
+// -------------------------------
+
+// A `HeaderSetDecompressor` instance is a transform stream that can be used to *decompress a
+// single header set*. Its input is a stream of binary data chunks and its output is a stream of
+// `[name, value]` pairs.
+//
+// Currently, it is not a proper streaming decompressor implementation, since it buffer its input
+// until the end os the stream, and then processes the whole header block at once.
+
+util.inherits(HeaderSetDecompressor, TransformStream);
+function HeaderSetDecompressor(log, table) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+ this._table = table;
+ this._chunks = [];
+}
+
+// `_transform` is the implementation of the [corresponding virtual function][_transform] of the
+// TransformStream class. It collects the data chunks for later processing.
+// [_transform]: https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
+HeaderSetDecompressor.prototype._transform = function _transform(chunk, encoding, callback) {
+ this._chunks.push(chunk);
+ callback();
+};
+
+// `execute(rep)` executes the given [header representation][representation].
+// [representation]: https://tools.ietf.org/html/rfc7541#section-6
+
+// The *JavaScript object representation* of a header representation:
+//
+// {
+// name: String || Integer, // string literal or index
+// value: String || Integer, // string literal or index
+// index: Boolean // with or without indexing
+// }
+//
+// *Important:* to ease the indexing of the header table, indexes start at 0 instead of 1.
+//
+// Examples:
+//
+// Indexed:
+// { name: 2 , value: 2 , index: false }
+// Literal:
+// { name: 2 , value: 'X', index: false } // without indexing
+// { name: 2 , value: 'Y', index: true } // with indexing
+// { name: 'A', value: 'Z', index: true } // with indexing, literal name
+HeaderSetDecompressor.prototype._execute = function _execute(rep) {
+ this._log.trace({ key: rep.name, value: rep.value, index: rep.index },
+ 'Executing header representation');
+
+ var entry, pair;
+
+ if (rep.contextUpdate) {
+ this._table.setSizeLimit(rep.newMaxSize);
+ }
+
+ // * An _indexed representation_ entails the following actions:
+ // * The header field corresponding to the referenced entry is emitted
+ else if (typeof rep.value === 'number') {
+ var index = rep.value;
+ entry = this._table[index];
+
+ pair = entry.slice();
+ this.push(pair);
+ }
+
+ // * A _literal representation_ that is _not added_ to the header table entails the following
+ // action:
+ // * The header is emitted.
+ // * A _literal representation_ that is _added_ to the header table entails the following further
+ // actions:
+ // * The header is added to the header table.
+ // * The header is emitted.
+ else {
+ if (typeof rep.name === 'number') {
+ pair = [this._table[rep.name][0], rep.value];
+ } else {
+ pair = [rep.name, rep.value];
+ }
+
+ if (rep.index) {
+ entry = entryFromPair(pair);
+ this._table.add(entry);
+ }
+
+ this.push(pair);
+ }
+};
+
+// `_flush` is the implementation of the [corresponding virtual function][_flush] of the
+// TransformStream class. The whole decompressing process is done in `_flush`. It gets called when
+// the input stream is over.
+// [_flush]: https://nodejs.org/api/stream.html#stream_transform_flush_callback
+HeaderSetDecompressor.prototype._flush = function _flush(callback) {
+ var buffer = concat(this._chunks);
+
+ // * processes the header representations
+ buffer.cursor = 0;
+ while (buffer.cursor < buffer.length) {
+ this._execute(HeaderSetDecompressor.header(buffer));
+ }
+
+ callback();
+};
+
+// The HeaderSetCompressor class
+// -----------------------------
+
+// A `HeaderSetCompressor` instance is a transform stream that can be used to *compress a single
+// header set*. Its input is a stream of `[name, value]` pairs and its output is a stream of
+// binary data chunks.
+//
+// It is a real streaming compressor, since it does not wait until the header set is complete.
+//
+// The compression algorithm is (intentionally) not specified by the spec. Therefore, the current
+// compression algorithm can probably be improved in the future.
+
+util.inherits(HeaderSetCompressor, TransformStream);
+function HeaderSetCompressor(log, table) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+ this._table = table;
+ this.push = TransformStream.prototype.push.bind(this);
+}
+
+HeaderSetCompressor.prototype.send = function send(rep) {
+ this._log.trace({ key: rep.name, value: rep.value, index: rep.index },
+ 'Emitting header representation');
+
+ if (!rep.chunks) {
+ rep.chunks = HeaderSetCompressor.header(rep);
+ }
+ rep.chunks.forEach(this.push);
+};
+
+// `_transform` is the implementation of the [corresponding virtual function][_transform] of the
+// TransformStream class. It processes the input headers one by one:
+// [_transform]: https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
+HeaderSetCompressor.prototype._transform = function _transform(pair, encoding, callback) {
+ var name = pair[0].toLowerCase();
+ var value = pair[1];
+ var entry, rep;
+
+ // * tries to find full (name, value) or name match in the header table
+ var nameMatch = -1, fullMatch = -1;
+ for (var droppedIndex = 0; droppedIndex < this._table.length; droppedIndex++) {
+ entry = this._table[droppedIndex];
+ if (entry[0] === name) {
+ if (entry[1] === value) {
+ fullMatch = droppedIndex;
+ break;
+ } else if (nameMatch === -1) {
+ nameMatch = droppedIndex;
+ }
+ }
+ }
+
+ var mustNeverIndex = ((name === 'cookie' && value.length < 20) ||
+ (name === 'set-cookie' && value.length < 20) ||
+ name === 'authorization');
+
+ if (fullMatch !== -1 && !mustNeverIndex) {
+ this.send({ name: fullMatch, value: fullMatch, index: false });
+ }
+
+ // * otherwise, it will be a literal representation (with a name index if there's a name match)
+ else {
+ entry = entryFromPair(pair);
+
+ var indexing = (entry._size < this._table._limit / 2) && !mustNeverIndex;
+
+ if (indexing) {
+ this._table.add(entry);
+ }
+
+ this.send({ name: (nameMatch !== -1) ? nameMatch : name, value: value, index: indexing, mustNeverIndex: mustNeverIndex, contextUpdate: false });
+ }
+
+ callback();
+};
+
+// `_flush` is the implementation of the [corresponding virtual function][_flush] of the
+// TransformStream class. It gets called when there's no more header to compress. The final step:
+// [_flush]: https://nodejs.org/api/stream.html#stream_transform_flush_callback
+HeaderSetCompressor.prototype._flush = function _flush(callback) {
+ callback();
+};
+
+// [Detailed Format](https://tools.ietf.org/html/rfc7541#section-5)
+// -----------------
+
+// ### Integer representation ###
+//
+// The algorithm to represent an integer I is as follows:
+//
+// 1. If I < 2^N - 1, encode I on N bits
+// 2. Else, encode 2^N - 1 on N bits and do the following steps:
+// 1. Set I to (I - (2^N - 1)) and Q to 1
+// 2. While Q > 0
+// 1. Compute Q and R, quotient and remainder of I divided by 2^7
+// 2. If Q is strictly greater than 0, write one 1 bit; otherwise, write one 0 bit
+// 3. Encode R on the next 7 bits
+// 4. I = Q
+
+HeaderSetCompressor.integer = function writeInteger(I, N) {
+ var limit = Math.pow(2,N) - 1;
+ if (I < limit) {
+ return [Buffer.from([I])];
+ }
+
+ var bytes = [];
+ if (N !== 0) {
+ bytes.push(limit);
+ }
+ I -= limit;
+
+ var Q = 1, R;
+ while (Q > 0) {
+ Q = Math.floor(I / 128);
+ R = I % 128;
+
+ if (Q > 0) {
+ R += 128;
+ }
+ bytes.push(R);
+
+ I = Q;
+ }
+
+ return [Buffer.from(bytes)];
+};
+
+// The inverse algorithm:
+//
+// 1. Set I to the number coded on the lower N bits of the first byte
+// 2. If I is smaller than 2^N - 1 then return I
+// 2. Else the number is encoded on more than one byte, so do the following steps:
+// 1. Set M to 0
+// 2. While returning with I
+// 1. Let B be the next byte (the first byte if N is 0)
+// 2. Read out the lower 7 bits of B and multiply it with 2^M
+// 3. Increase I with this number
+// 4. Increase M by 7
+// 5. Return I if the most significant bit of B is 0
+
+HeaderSetDecompressor.integer = function readInteger(buffer, N) {
+ var limit = Math.pow(2,N) - 1;
+
+ var I = buffer[buffer.cursor] & limit;
+ if (N !== 0) {
+ buffer.cursor += 1;
+ }
+
+ if (I === limit) {
+ var M = 0;
+ do {
+ I += (buffer[buffer.cursor] & 127) << M;
+ M += 7;
+ buffer.cursor += 1;
+ } while (buffer[buffer.cursor - 1] & 128);
+ }
+
+ return I;
+};
+
+// ### Huffman Encoding ###
+
+function HuffmanTable(table) {
+ function createTree(codes, position) {
+ if (codes.length === 1) {
+ return [table.indexOf(codes[0])];
+ }
+
+ else {
+ position = position || 0;
+ var zero = [];
+ var one = [];
+ for (var i = 0; i < codes.length; i++) {
+ var string = codes[i];
+ if (string[position] === '0') {
+ zero.push(string);
+ } else {
+ one.push(string);
+ }
+ }
+ return [createTree(zero, position + 1), createTree(one, position + 1)];
+ }
+ }
+
+ this.tree = createTree(table);
+
+ this.codes = table.map(function(bits) {
+ return parseInt(bits, 2);
+ });
+ this.lengths = table.map(function(bits) {
+ return bits.length;
+ });
+}
+
+HuffmanTable.prototype.encode = function encode(buffer) {
+ var result = [];
+ var space = 8;
+
+ function add(data) {
+ if (space === 8) {
+ result.push(data);
+ } else {
+ result[result.length - 1] |= data;
+ }
+ }
+
+ for (var i = 0; i < buffer.length; i++) {
+ var byte = buffer[i];
+ var code = this.codes[byte];
+ var length = this.lengths[byte];
+
+ while (length !== 0) {
+ if (space >= length) {
+ add(code << (space - length));
+ code = 0;
+ space -= length;
+ length = 0;
+ } else {
+ var shift = length - space;
+ var msb = code >> shift;
+ add(msb);
+ code -= msb << shift;
+ length -= space;
+ space = 0;
+ }
+
+ if (space === 0) {
+ space = 8;
+ }
+ }
+ }
+
+ if (space !== 8) {
+ add(this.codes[256] >> (this.lengths[256] - space));
+ }
+
+ return Buffer.from(result);
+};
+
+HuffmanTable.prototype.decode = function decode(buffer) {
+ var result = [];
+ var subtree = this.tree;
+
+ for (var i = 0; i < buffer.length; i++) {
+ var byte = buffer[i];
+
+ for (var j = 0; j < 8; j++) {
+ var bit = (byte & 128) ? 1 : 0;
+ byte = byte << 1;
+
+ subtree = subtree[bit];
+ if (subtree.length === 1) {
+ result.push(subtree[0]);
+ subtree = this.tree;
+ }
+ }
+ }
+
+ return Buffer.from(result);
+};
+
+// The initializer arrays for the Huffman tables are generated with feeding the tables from the
+// spec to this sed command:
+//
+// sed -e "s/^.* [|]//g" -e "s/|//g" -e "s/ .*//g" -e "s/^/ '/g" -e "s/$/',/g"
+
+HuffmanTable.huffmanTable = new HuffmanTable([
+ '1111111111000',
+ '11111111111111111011000',
+ '1111111111111111111111100010',
+ '1111111111111111111111100011',
+ '1111111111111111111111100100',
+ '1111111111111111111111100101',
+ '1111111111111111111111100110',
+ '1111111111111111111111100111',
+ '1111111111111111111111101000',
+ '111111111111111111101010',
+ '111111111111111111111111111100',
+ '1111111111111111111111101001',
+ '1111111111111111111111101010',
+ '111111111111111111111111111101',
+ '1111111111111111111111101011',
+ '1111111111111111111111101100',
+ '1111111111111111111111101101',
+ '1111111111111111111111101110',
+ '1111111111111111111111101111',
+ '1111111111111111111111110000',
+ '1111111111111111111111110001',
+ '1111111111111111111111110010',
+ '111111111111111111111111111110',
+ '1111111111111111111111110011',
+ '1111111111111111111111110100',
+ '1111111111111111111111110101',
+ '1111111111111111111111110110',
+ '1111111111111111111111110111',
+ '1111111111111111111111111000',
+ '1111111111111111111111111001',
+ '1111111111111111111111111010',
+ '1111111111111111111111111011',
+ '010100',
+ '1111111000',
+ '1111111001',
+ '111111111010',
+ '1111111111001',
+ '010101',
+ '11111000',
+ '11111111010',
+ '1111111010',
+ '1111111011',
+ '11111001',
+ '11111111011',
+ '11111010',
+ '010110',
+ '010111',
+ '011000',
+ '00000',
+ '00001',
+ '00010',
+ '011001',
+ '011010',
+ '011011',
+ '011100',
+ '011101',
+ '011110',
+ '011111',
+ '1011100',
+ '11111011',
+ '111111111111100',
+ '100000',
+ '111111111011',
+ '1111111100',
+ '1111111111010',
+ '100001',
+ '1011101',
+ '1011110',
+ '1011111',
+ '1100000',
+ '1100001',
+ '1100010',
+ '1100011',
+ '1100100',
+ '1100101',
+ '1100110',
+ '1100111',
+ '1101000',
+ '1101001',
+ '1101010',
+ '1101011',
+ '1101100',
+ '1101101',
+ '1101110',
+ '1101111',
+ '1110000',
+ '1110001',
+ '1110010',
+ '11111100',
+ '1110011',
+ '11111101',
+ '1111111111011',
+ '1111111111111110000',
+ '1111111111100',
+ '11111111111100',
+ '100010',
+ '111111111111101',
+ '00011',
+ '100011',
+ '00100',
+ '100100',
+ '00101',
+ '100101',
+ '100110',
+ '100111',
+ '00110',
+ '1110100',
+ '1110101',
+ '101000',
+ '101001',
+ '101010',
+ '00111',
+ '101011',
+ '1110110',
+ '101100',
+ '01000',
+ '01001',
+ '101101',
+ '1110111',
+ '1111000',
+ '1111001',
+ '1111010',
+ '1111011',
+ '111111111111110',
+ '11111111100',
+ '11111111111101',
+ '1111111111101',
+ '1111111111111111111111111100',
+ '11111111111111100110',
+ '1111111111111111010010',
+ '11111111111111100111',
+ '11111111111111101000',
+ '1111111111111111010011',
+ '1111111111111111010100',
+ '1111111111111111010101',
+ '11111111111111111011001',
+ '1111111111111111010110',
+ '11111111111111111011010',
+ '11111111111111111011011',
+ '11111111111111111011100',
+ '11111111111111111011101',
+ '11111111111111111011110',
+ '111111111111111111101011',
+ '11111111111111111011111',
+ '111111111111111111101100',
+ '111111111111111111101101',
+ '1111111111111111010111',
+ '11111111111111111100000',
+ '111111111111111111101110',
+ '11111111111111111100001',
+ '11111111111111111100010',
+ '11111111111111111100011',
+ '11111111111111111100100',
+ '111111111111111011100',
+ '1111111111111111011000',
+ '11111111111111111100101',
+ '1111111111111111011001',
+ '11111111111111111100110',
+ '11111111111111111100111',
+ '111111111111111111101111',
+ '1111111111111111011010',
+ '111111111111111011101',
+ '11111111111111101001',
+ '1111111111111111011011',
+ '1111111111111111011100',
+ '11111111111111111101000',
+ '11111111111111111101001',
+ '111111111111111011110',
+ '11111111111111111101010',
+ '1111111111111111011101',
+ '1111111111111111011110',
+ '111111111111111111110000',
+ '111111111111111011111',
+ '1111111111111111011111',
+ '11111111111111111101011',
+ '11111111111111111101100',
+ '111111111111111100000',
+ '111111111111111100001',
+ '1111111111111111100000',
+ '111111111111111100010',
+ '11111111111111111101101',
+ '1111111111111111100001',
+ '11111111111111111101110',
+ '11111111111111111101111',
+ '11111111111111101010',
+ '1111111111111111100010',
+ '1111111111111111100011',
+ '1111111111111111100100',
+ '11111111111111111110000',
+ '1111111111111111100101',
+ '1111111111111111100110',
+ '11111111111111111110001',
+ '11111111111111111111100000',
+ '11111111111111111111100001',
+ '11111111111111101011',
+ '1111111111111110001',
+ '1111111111111111100111',
+ '11111111111111111110010',
+ '1111111111111111101000',
+ '1111111111111111111101100',
+ '11111111111111111111100010',
+ '11111111111111111111100011',
+ '11111111111111111111100100',
+ '111111111111111111111011110',
+ '111111111111111111111011111',
+ '11111111111111111111100101',
+ '111111111111111111110001',
+ '1111111111111111111101101',
+ '1111111111111110010',
+ '111111111111111100011',
+ '11111111111111111111100110',
+ '111111111111111111111100000',
+ '111111111111111111111100001',
+ '11111111111111111111100111',
+ '111111111111111111111100010',
+ '111111111111111111110010',
+ '111111111111111100100',
+ '111111111111111100101',
+ '11111111111111111111101000',
+ '11111111111111111111101001',
+ '1111111111111111111111111101',
+ '111111111111111111111100011',
+ '111111111111111111111100100',
+ '111111111111111111111100101',
+ '11111111111111101100',
+ '111111111111111111110011',
+ '11111111111111101101',
+ '111111111111111100110',
+ '1111111111111111101001',
+ '111111111111111100111',
+ '111111111111111101000',
+ '11111111111111111110011',
+ '1111111111111111101010',
+ '1111111111111111101011',
+ '1111111111111111111101110',
+ '1111111111111111111101111',
+ '111111111111111111110100',
+ '111111111111111111110101',
+ '11111111111111111111101010',
+ '11111111111111111110100',
+ '11111111111111111111101011',
+ '111111111111111111111100110',
+ '11111111111111111111101100',
+ '11111111111111111111101101',
+ '111111111111111111111100111',
+ '111111111111111111111101000',
+ '111111111111111111111101001',
+ '111111111111111111111101010',
+ '111111111111111111111101011',
+ '1111111111111111111111111110',
+ '111111111111111111111101100',
+ '111111111111111111111101101',
+ '111111111111111111111101110',
+ '111111111111111111111101111',
+ '111111111111111111111110000',
+ '11111111111111111111101110',
+ '111111111111111111111111111111'
+]);
+
+// ### String literal representation ###
+//
+// Literal **strings** can represent header names or header values. There's two variant of the
+// string encoding:
+//
+// String literal with Huffman encoding:
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 1 | Value Length Prefix (7) |
+// +---+---+---+---+---+---+---+---+
+// | Value Length (0-N bytes) |
+// +---+---+---+---+---+---+---+---+
+// ...
+// +---+---+---+---+---+---+---+---+
+// | Huffman Encoded Data |Padding|
+// +---+---+---+---+---+---+---+---+
+//
+// String literal without Huffman encoding:
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | Value Length Prefix (7) |
+// +---+---+---+---+---+---+---+---+
+// | Value Length (0-N bytes) |
+// +---+---+---+---+---+---+---+---+
+// ...
+// +---+---+---+---+---+---+---+---+
+// | Field Bytes Without Encoding |
+// +---+---+---+---+---+---+---+---+
+
+HeaderSetCompressor.string = function writeString(str) {
+ str = Buffer.from(str, 'utf8');
+
+ var huffman = HuffmanTable.huffmanTable.encode(str);
+ if (huffman.length < str.length) {
+ var length = HeaderSetCompressor.integer(huffman.length, 7);
+ length[0][0] |= 128;
+ return length.concat(huffman);
+ }
+
+ else {
+ length = HeaderSetCompressor.integer(str.length, 7);
+ return length.concat(str);
+ }
+};
+
+HeaderSetDecompressor.string = function readString(buffer) {
+ var huffman = buffer[buffer.cursor] & 128;
+ var length = HeaderSetDecompressor.integer(buffer, 7);
+ var encoded = buffer.slice(buffer.cursor, buffer.cursor + length);
+ buffer.cursor += length;
+ return (huffman ? HuffmanTable.huffmanTable.decode(encoded) : encoded).toString('utf8');
+};
+
+// ### Header represenations ###
+
+// The JavaScript object representation is described near the
+// `HeaderSetDecompressor.prototype._execute()` method definition.
+//
+// **All binary header representations** start with a prefix signaling the representation type and
+// an index represented using prefix coded integers:
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 1 | Index (7+) | Indexed Representation
+// +---+---------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 1 | Index (6+) |
+// +---+---+---+-------------------+ Literal w/ Indexing
+// | Value Length (8+) |
+// +-------------------------------+ w/ Indexed Name
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 1 | 0 |
+// +---+---+---+-------------------+
+// | Name Length (8+) |
+// +-------------------------------+ Literal w/ Indexing
+// | Name String (Length octets) |
+// +-------------------------------+ w/ New Name
+// | Value Length (8+) |
+// +-------------------------------+
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 0 | Index (4+) |
+// +---+---+---+-------------------+ Literal w/o Incremental Indexing
+// | Value Length (8+) |
+// +-------------------------------+ w/ Indexed Name
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 0 | 0 |
+// +---+---+---+-------------------+
+// | Name Length (8+) |
+// +-------------------------------+ Literal w/o Incremental Indexing
+// | Name String (Length octets) |
+// +-------------------------------+ w/ New Name
+// | Value Length (8+) |
+// +-------------------------------+
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 1 | Index (4+) |
+// +---+---+---+-------------------+ Literal never indexed
+// | Value Length (8+) |
+// +-------------------------------+ w/ Indexed Name
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 1 | 0 |
+// +---+---+---+-------------------+
+// | Name Length (8+) |
+// +-------------------------------+ Literal never indexed
+// | Name String (Length octets) |
+// +-------------------------------+ w/ New Name
+// | Value Length (8+) |
+// +-------------------------------+
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// The **Indexed Representation** consists of the 1-bit prefix and the Index that is represented as
+// a 7-bit prefix coded integer and nothing else.
+//
+// After the first bits, **all literal representations** specify the header name, either as a
+// pointer to the Header Table (Index) or a string literal. When the string literal representation
+// is used, the Index is set to 0 and the string literal starts at the second byte.
+//
+// For **all literal representations**, the specification of the header value comes next. It is
+// always represented as a string.
+
+var representations = {
+ indexed : { prefix: 7, pattern: 0x80 },
+ literalIncremental : { prefix: 6, pattern: 0x40 },
+ contextUpdate : { prefix: 0, pattern: 0x20 },
+ literalNeverIndexed : { prefix: 4, pattern: 0x10 },
+ literal : { prefix: 4, pattern: 0x00 }
+};
+
+HeaderSetCompressor.header = function writeHeader(header) {
+ var representation, buffers = [];
+
+ if (header.contextUpdate) {
+ representation = representations.contextUpdate;
+ } else if (typeof header.value === 'number') {
+ representation = representations.indexed;
+ } else if (header.index) {
+ representation = representations.literalIncremental;
+ } else if (header.mustNeverIndex) {
+ representation = representations.literalNeverIndexed;
+ } else {
+ representation = representations.literal;
+ }
+
+ if (representation === representations.contextUpdate) {
+ buffers.push(HeaderSetCompressor.integer(header.newMaxSize, 5));
+ }
+
+ else if (representation === representations.indexed) {
+ buffers.push(HeaderSetCompressor.integer(header.value + 1, representation.prefix));
+ }
+
+ else {
+ if (typeof header.name === 'number') {
+ buffers.push(HeaderSetCompressor.integer(header.name + 1, representation.prefix));
+ } else {
+ buffers.push(HeaderSetCompressor.integer(0, representation.prefix));
+ buffers.push(HeaderSetCompressor.string(header.name));
+ }
+ buffers.push(HeaderSetCompressor.string(header.value));
+ }
+
+ buffers[0][0][0] |= representation.pattern;
+
+ return Array.prototype.concat.apply([], buffers); // array of arrays of buffers -> array of buffers
+};
+
+HeaderSetDecompressor.header = function readHeader(buffer) {
+ var representation, header = {};
+
+ var firstByte = buffer[buffer.cursor];
+ if (firstByte & 0x80) {
+ representation = representations.indexed;
+ } else if (firstByte & 0x40) {
+ representation = representations.literalIncremental;
+ } else if (firstByte & 0x20) {
+ representation = representations.contextUpdate;
+ } else if (firstByte & 0x10) {
+ representation = representations.literalNeverIndexed;
+ } else {
+ representation = representations.literal;
+ }
+
+ header.value = header.name = -1;
+ header.index = false;
+ header.contextUpdate = false;
+ header.newMaxSize = 0;
+ header.mustNeverIndex = false;
+
+ if (representation === representations.contextUpdate) {
+ header.contextUpdate = true;
+ header.newMaxSize = HeaderSetDecompressor.integer(buffer, 5);
+ }
+
+ else if (representation === representations.indexed) {
+ header.value = header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1;
+ }
+
+ else {
+ header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1;
+ if (header.name === -1) {
+ header.name = HeaderSetDecompressor.string(buffer);
+ }
+ header.value = HeaderSetDecompressor.string(buffer);
+ header.index = (representation === representations.literalIncremental);
+ header.mustNeverIndex = (representation === representations.literalNeverIndexed);
+ }
+
+ return header;
+};
+
+// Integration with HTTP/2
+// =======================
+
+// This section describes the interaction between the compressor/decompressor and the rest of the
+// HTTP/2 implementation. The `Compressor` and the `Decompressor` makes up a layer between the
+// [framer](framer.html) and the [connection handling component](connection.html). They let most
+// frames pass through, except HEADERS and PUSH_PROMISE frames. They convert the frames between
+// these two representations:
+//
+// { {
+// type: 'HEADERS', type: 'HEADERS',
+// flags: {}, flags: {},
+// stream: 1, <===> stream: 1,
+// headers: { data: Buffer
+// N1: 'V1', }
+// N2: ['V1', 'V2', ...],
+// // ...
+// }
+// }
+//
+// There are possibly several binary frame that belong to a single non-binary frame.
+
+var MAX_HTTP_PAYLOAD_SIZE = 16384;
+
+// The Compressor class
+// --------------------
+
+// The Compressor transform stream is basically stateless.
+util.inherits(Compressor, TransformStream);
+function Compressor(log, type) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+
+ assert((type === 'REQUEST') || (type === 'RESPONSE'));
+ this._table = new HeaderTable(this._log);
+
+ this.tableSizeChangePending = false;
+ this.lowestTableSizePending = 0;
+ this.tableSizeSetting = DEFAULT_HEADER_TABLE_LIMIT;
+}
+
+// Changing the header table size
+Compressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) {
+ this._table.setSizeLimit(size);
+ if (!this.tableSizeChangePending || size < this.lowestTableSizePending) {
+ this.lowestTableSizePending = size;
+ }
+ this.tableSizeSetting = size;
+ this.tableSizeChangePending = true;
+};
+
+// `compress` takes a header set, and compresses it using a new `HeaderSetCompressor` stream
+// instance. This means that from now on, the advantages of streaming header encoding are lost,
+// but the API becomes simpler.
+Compressor.prototype.compress = function compress(headers) {
+ var compressor = new HeaderSetCompressor(this._log, this._table);
+
+ if (this.tableSizeChangePending) {
+ if (this.lowestTableSizePending < this.tableSizeSetting) {
+ compressor.send({contextUpdate: true, newMaxSize: this.lowestTableSizePending,
+ name: "", value: "", index: 0});
+ }
+ compressor.send({contextUpdate: true, newMaxSize: this.tableSizeSetting,
+ name: "", value: "", index: 0});
+ this.tableSizeChangePending = false;
+ }
+ var colonHeaders = [];
+ var nonColonHeaders = [];
+
+ // To ensure we send colon headers first
+ for (var name in headers) {
+ if (name.trim()[0] === ':') {
+ colonHeaders.push(name);
+ } else {
+ nonColonHeaders.push(name);
+ }
+ }
+
+ function compressHeader(name) {
+ var value = headers[name];
+ name = String(name).toLowerCase();
+
+ // * To allow for better compression efficiency, the Cookie header field MAY be split into
+ // separate header fields, each with one or more cookie-pairs.
+ if (name == 'cookie') {
+ if (!(value instanceof Array)) {
+ value = [value];
+ }
+ value = Array.prototype.concat.apply([], value.map(function(cookie) {
+ return String(cookie).split(';').map(trim);
+ }));
+ }
+
+ if (value instanceof Array) {
+ for (var i = 0; i < value.length; i++) {
+ compressor.write([name, String(value[i])]);
+ }
+ } else {
+ compressor.write([name, String(value)]);
+ }
+ }
+
+ colonHeaders.forEach(compressHeader);
+ nonColonHeaders.forEach(compressHeader);
+
+ compressor.end();
+
+ var chunk, chunks = [];
+ while (chunk = compressor.read()) {
+ chunks.push(chunk);
+ }
+
+ function insertSoftIllegalHpack(originalCompressed) {
+ var illegalLiteral = Buffer.from([
+ 0x00, // Literal, no index
+ 0x08, // Name: not huffman encoded, 8 bytes long
+ 0x3a,
+ 0x69,
+ 0x6c,
+ 0x6c,
+ 0x65,
+ 0x67,
+ 0x61,
+ 0x6c, // :illegal
+ 0x10, // Value: not huffman encoded, 16 bytes long
+ // REALLY NOT LEGAL
+ 0x52,
+ 0x45,
+ 0x41,
+ 0x4c,
+ 0x4c,
+ 0x59,
+ 0x20,
+ 0x4e,
+ 0x4f,
+ 0x54,
+ 0x20,
+ 0x4c,
+ 0x45,
+ 0x47,
+ 0x41,
+ 0x4c,
+ ]);
+ var newBufferLength = originalCompressed.length + illegalLiteral.length;
+ var concatenated = Buffer.alloc(newBufferLength);
+ originalCompressed.copy(concatenated, 0);
+ illegalLiteral.copy(concatenated, originalCompressed.length);
+ return concatenated;
+ }
+
+ function insertHardIllegalHpack(originalCompressed) {
+ // Now we have to add an invalid header
+ var illegalIndexed = HeaderSetCompressor.integer(5000, 7);
+ // The above returns an array of buffers, but there's only one buffer, so
+ // get rid of the array.
+ illegalIndexed = illegalIndexed[0];
+ // Set the first bit to 1 to signal this is an indexed representation
+ illegalIndexed[0] |= 0x80;
+ var newBufferLength = originalCompressed.length + illegalIndexed.length;
+ var concatenated = Buffer.alloc(newBufferLength);
+ originalCompressed.copy(concatenated, 0);
+ illegalIndexed.copy(concatenated, originalCompressed.length);
+ return concatenated;
+ }
+
+ if ("x-softillegalhpack" in headers) {
+ return insertSoftIllegalHpack(concat(chunks));
+ }
+
+ if ("x-hardillegalhpack" in headers) {
+ return insertHardIllegalHpack(concat(chunks));
+ }
+
+ return concat(chunks);
+};
+
+// When a `frame` arrives
+Compressor.prototype._transform = function _transform(frame, encoding, done) {
+ // * and it is a HEADERS or PUSH_PROMISE frame
+ // * it generates a header block using the compress method
+ // * cuts the header block into `chunks` that are not larger than `MAX_HTTP_PAYLOAD_SIZE`
+ // * for each chunk, it pushes out a chunk frame that is identical to the original, except
+ // the `data` property which holds the given chunk, the type of the frame which is always
+ // CONTINUATION except for the first frame, and the END_HEADERS/END_PUSH_STREAM flag that
+ // marks the last frame and the END_STREAM flag which is always false before the end
+ if (frame.type === 'HEADERS' || frame.type === 'PUSH_PROMISE') {
+ var buffer = this.compress(frame.headers);
+
+ // This will result in CONTINUATIONs from a PUSH_PROMISE being 4 bytes shorter than they could
+ // be, but that's not the end of the world, and it prevents us from going over MAX_HTTP_PAYLOAD_SIZE
+ // on the initial PUSH_PROMISE frame.
+ var adjustment = frame.type === 'PUSH_PROMISE' ? 4 : 0;
+ var chunks = cut(buffer, MAX_HTTP_PAYLOAD_SIZE - adjustment);
+
+ for (var i = 0; i < chunks.length; i++) {
+ var chunkFrame;
+ var first = (i === 0);
+ var last = (i === chunks.length - 1);
+
+ if (first) {
+ chunkFrame = util._extend({}, frame);
+ chunkFrame.flags = util._extend({}, frame.flags);
+ chunkFrame.flags['END_' + frame.type] = last;
+ } else {
+ chunkFrame = {
+ type: 'CONTINUATION',
+ flags: { END_HEADERS: last },
+ stream: frame.stream
+ };
+ }
+ chunkFrame.data = chunks[i];
+
+ this.push(chunkFrame);
+ }
+ }
+
+ // * otherwise, the frame is forwarded without taking any action
+ else {
+ this.push(frame);
+ }
+
+ done();
+};
+
+// The Decompressor class
+// ----------------------
+
+// The Decompressor is a stateful transform stream, since it has to collect multiple frames first,
+// and the decoding comes after unifying the payload of those frames.
+//
+// If there's a frame in progress, `this._inProgress` is `true`. The frames are collected in
+// `this._frames`, and the type of the frame and the stream identifier is stored in `this._type`
+// and `this._stream` respectively.
+util.inherits(Decompressor, TransformStream);
+function Decompressor(log, type) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+
+ assert((type === 'REQUEST') || (type === 'RESPONSE'));
+ this._table = new HeaderTable(this._log);
+
+ this._inProgress = false;
+ this._base = undefined;
+}
+
+// Changing the header table size
+Decompressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) {
+ this._table.setSizeLimit(size);
+};
+
+// `decompress` takes a full header block, and decompresses it using a new `HeaderSetDecompressor`
+// stream instance. This means that from now on, the advantages of streaming header decoding are
+// lost, but the API becomes simpler.
+Decompressor.prototype.decompress = function decompress(block) {
+ var decompressor = new HeaderSetDecompressor(this._log, this._table);
+ decompressor.end(block);
+
+ var seenNonColonHeader = false;
+ var headers = {};
+ var pair;
+ while (pair = decompressor.read()) {
+ var name = pair[0];
+ var value = pair[1];
+ var isColonHeader = (name.trim()[0] === ':');
+ if (seenNonColonHeader && isColonHeader) {
+ this.emit('error', 'PROTOCOL_ERROR');
+ return headers;
+ }
+ seenNonColonHeader = !isColonHeader;
+ if (name in headers) {
+ if (headers[name] instanceof Array) {
+ headers[name].push(value);
+ } else {
+ headers[name] = [headers[name], value];
+ }
+ } else {
+ headers[name] = value;
+ }
+ }
+
+ // * If there are multiple Cookie header fields after decompression, these MUST be concatenated
+ // into a single octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII
+ // string "; ").
+ if (('cookie' in headers) && (headers['cookie'] instanceof Array)) {
+ headers['cookie'] = headers['cookie'].join('; ');
+ }
+
+ return headers;
+};
+
+// When a `frame` arrives
+Decompressor.prototype._transform = function _transform(frame, encoding, done) {
+ // * and the collection process is already `_inProgress`, the frame is simply stored, except if
+ // it's an illegal frame
+ if (this._inProgress) {
+ if ((frame.type !== 'CONTINUATION') || (frame.stream !== this._base.stream)) {
+ this._log.error('A series of HEADER frames were not continuous');
+ this.emit('error', 'PROTOCOL_ERROR');
+ return;
+ }
+ this._frames.push(frame);
+ }
+
+ // * and the collection process is not `_inProgress`, but the new frame's type is HEADERS or
+ // PUSH_PROMISE, a new collection process begins
+ else if ((frame.type === 'HEADERS') || (frame.type === 'PUSH_PROMISE')) {
+ this._inProgress = true;
+ this._base = util._extend({}, frame);
+ this._frames = [frame];
+ }
+
+ // * otherwise, the frame is forwarded without taking any action
+ else {
+ this.push(frame);
+ }
+
+ // * When the frame signals that it's the last in the series, the header block chunks are
+ // concatenated, the headers are decompressed, and a new frame gets pushed out with the
+ // decompressed headers.
+ if (this._inProgress && (frame.flags.END_HEADERS || frame.flags.END_PUSH_PROMISE)) {
+ var buffer = concat(this._frames.map(function(frame) {
+ return frame.data;
+ }));
+ try {
+ var headers = this.decompress(buffer);
+ } catch(error) {
+ this._log.error({ err: error }, 'Header decompression error');
+ this.emit('error', 'COMPRESSION_ERROR');
+ return;
+ }
+ this.push(util._extend(this._base, { headers: headers }));
+ this._inProgress = false;
+ }
+
+ done();
+};
+
+// Helper functions
+// ================
+
+// Concatenate an array of buffers into a new buffer
+function concat(buffers) {
+ var size = 0;
+ for (var i = 0; i < buffers.length; i++) {
+ size += buffers[i].length;
+ }
+
+ var concatenated = Buffer.alloc(size);
+ for (var cursor = 0, j = 0; j < buffers.length; cursor += buffers[j].length, j++) {
+ buffers[j].copy(concatenated, cursor);
+ }
+
+ return concatenated;
+}
+
+// Cut `buffer` into chunks not larger than `size`
+function cut(buffer, size) {
+ var chunks = [];
+ var cursor = 0;
+ do {
+ var chunkSize = Math.min(size, buffer.length - cursor);
+ chunks.push(buffer.slice(cursor, cursor + chunkSize));
+ cursor += chunkSize;
+ } while(cursor < buffer.length);
+ return chunks;
+}
+
+function trim(string) {
+ return string.trim();
+}
diff --git a/testing/xpcshell/node-http2/lib/protocol/connection.js b/testing/xpcshell/node-http2/lib/protocol/connection.js
new file mode 100644
index 0000000000..8c203675fa
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/connection.js
@@ -0,0 +1,630 @@
+var assert = require('assert');
+
+// The Connection class
+// ====================
+
+// The Connection class manages HTTP/2 connections. Each instance corresponds to one transport
+// stream (TCP stream). It operates by sending and receiving frames and is implemented as a
+// [Flow](flow.html) subclass.
+
+var Flow = require('./flow').Flow;
+
+exports.Connection = Connection;
+
+// Public API
+// ----------
+
+// * **new Connection(log, firstStreamId, settings)**: create a new Connection
+//
+// * **Event: 'error' (type)**: signals a connection level error made by the other end
+//
+// * **Event: 'peerError' (type)**: signals the receipt of a GOAWAY frame that contains an error
+// code other than NO_ERROR
+//
+// * **Event: 'stream' (stream)**: signals that there's an incoming stream
+//
+// * **createStream(): stream**: initiate a new stream
+//
+// * **set(settings, callback)**: change the value of one or more settings according to the
+// key-value pairs of `settings`. The callback is called after the peer acknowledged the changes.
+//
+// * **ping([callback])**: send a ping and call callback when the answer arrives
+//
+// * **close([error])**: close the stream with an error code
+
+// Constructor
+// -----------
+
+// The main aspects of managing the connection are:
+function Connection(log, firstStreamId, settings) {
+ // * initializing the base class
+ Flow.call(this, 0);
+
+ // * logging: every method uses the common logger object
+ this._log = log.child({ component: 'connection' });
+
+ // * stream management
+ this._initializeStreamManagement(firstStreamId);
+
+ // * lifecycle management
+ this._initializeLifecycleManagement();
+
+ // * flow control
+ this._initializeFlowControl();
+
+ // * settings management
+ this._initializeSettingsManagement(settings);
+
+ // * multiplexing
+ this._initializeMultiplexing();
+}
+Connection.prototype = Object.create(Flow.prototype, { constructor: { value: Connection } });
+
+// Overview
+// --------
+
+// | ^ | ^
+// v | v |
+// +--------------+ +--------------+
+// +---| stream1 |---| stream2 |---- .... ---+
+// | | +----------+ | | +----------+ | |
+// | | | stream1. | | | | stream2. | | |
+// | +-| upstream |-+ +-| upstream |-+ |
+// | +----------+ +----------+ |
+// | | ^ | ^ |
+// | v | v | |
+// | +-----+-------------+-----+-------- .... |
+// | ^ | | | |
+// | | v | | |
+// | +--------------+ | | |
+// | | stream0 | | | |
+// | | connection | | | |
+// | | management | multiplexing |
+// | +--------------+ flow control |
+// | | ^ |
+// | _read() | | _write() |
+// | v | |
+// | +------------+ +-----------+ |
+// | |output queue| |input queue| |
+// +----------------+------------+-+-----------+-----------------+
+// | ^
+// read() | | write()
+// v |
+
+// Stream management
+// -----------------
+
+var Stream = require('./stream').Stream;
+
+// Initialization:
+Connection.prototype._initializeStreamManagement = function _initializeStreamManagement(firstStreamId) {
+ // * streams are stored in two data structures:
+ // * `_streamIds` is an id -> stream map of the streams that are allowed to receive frames.
+ // * `_streamPriorities` is a priority -> [stream] map of stream that allowed to send frames.
+ this._streamIds = [];
+ this._streamPriorities = [];
+
+ // * The next outbound stream ID and the last inbound stream id
+ this._nextStreamId = firstStreamId;
+ this._lastIncomingStream = 0;
+
+ // * Calling `_writeControlFrame` when there's an incoming stream with 0 as stream ID
+ this._streamIds[0] = { upstream: { write: this._writeControlFrame.bind(this) } };
+
+ // * By default, the number of concurrent outbound streams is not limited. The `_streamLimit` can
+ // be set by the SETTINGS_MAX_CONCURRENT_STREAMS setting.
+ this._streamSlotsFree = Infinity;
+ this._streamLimit = Infinity;
+ this.on('RECEIVING_SETTINGS_MAX_CONCURRENT_STREAMS', this._updateStreamLimit);
+};
+
+// `_writeControlFrame` is called when there's an incoming frame in the `_control` stream. It
+// broadcasts the message by creating an event on it.
+Connection.prototype._writeControlFrame = function _writeControlFrame(frame) {
+ if ((frame.type === 'SETTINGS') || (frame.type === 'PING') ||
+ (frame.type === 'GOAWAY') || (frame.type === 'WINDOW_UPDATE') ||
+ (frame.type === 'ALTSVC') || (frame.type == 'ORIGIN')) {
+ this._log.debug({ frame: frame }, 'Receiving connection level frame');
+ this.emit(frame.type, frame);
+ } else {
+ this._log.error({ frame: frame }, 'Invalid connection level frame');
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+};
+
+// Methods to manage the stream slot pool:
+Connection.prototype._updateStreamLimit = function _updateStreamLimit(newStreamLimit) {
+ var wakeup = (this._streamSlotsFree === 0) && (newStreamLimit > this._streamLimit);
+ this._streamSlotsFree += newStreamLimit - this._streamLimit;
+ this._streamLimit = newStreamLimit;
+ if (wakeup) {
+ this.emit('wakeup');
+ }
+};
+
+Connection.prototype._changeStreamCount = function _changeStreamCount(change) {
+ if (change) {
+ this._log.trace({ free: this._streamSlotsFree, change: change },
+ 'Changing active stream count.');
+ var wakeup = (this._streamSlotsFree === 0) && (change < 0);
+ this._streamSlotsFree -= change;
+ if (wakeup) {
+ this.emit('wakeup');
+ }
+ }
+};
+
+// Creating a new *inbound or outbound* stream with the given `id` (which is undefined in case of
+// an outbound stream) consists of three steps:
+//
+// 1. var stream = new Stream(this._log, this);
+// 2. this._allocateId(stream, id);
+// 2. this._allocatePriority(stream);
+
+// Allocating an ID to a stream
+Connection.prototype._allocateId = function _allocateId(stream, id) {
+ // * initiated stream without definite ID
+ if (id === undefined) {
+ id = this._nextStreamId;
+ this._nextStreamId += 2;
+ }
+
+ // * incoming stream with a legitim ID (larger than any previous and different parity than ours)
+ else if ((id > this._lastIncomingStream) && ((id - this._nextStreamId) % 2 !== 0)) {
+ this._lastIncomingStream = id;
+ }
+
+ // * incoming stream with invalid ID
+ else {
+ this._log.error({ stream_id: id, lastIncomingStream: this._lastIncomingStream },
+ 'Invalid incoming stream ID.');
+ this.emit('error', 'PROTOCOL_ERROR');
+ return undefined;
+ }
+
+ assert(!(id in this._streamIds));
+
+ // * adding to `this._streamIds`
+ this._log.trace({ s: stream, stream_id: id }, 'Allocating ID for stream.');
+ this._streamIds[id] = stream;
+ stream.id = id;
+ this.emit('new_stream', stream, id);
+
+ // * forwarding connection errors from streams
+ stream.on('connectionError', this.emit.bind(this, 'error'));
+
+ return id;
+};
+
+// Allocating a priority to a stream, and managing priority changes
+Connection.prototype._allocatePriority = function _allocatePriority(stream) {
+ this._log.trace({ s: stream }, 'Allocating priority for stream.');
+ this._insert(stream, stream._priority);
+ stream.on('priority', this._reprioritize.bind(this, stream));
+ stream.upstream.on('readable', this.emit.bind(this, 'wakeup'));
+ this.emit('wakeup');
+};
+
+Connection.prototype._insert = function _insert(stream, priority) {
+ if (priority in this._streamPriorities) {
+ this._streamPriorities[priority].push(stream);
+ } else {
+ this._streamPriorities[priority] = [stream];
+ }
+};
+
+Connection.prototype._reprioritize = function _reprioritize(stream, priority) {
+ var bucket = this._streamPriorities[stream._priority];
+ var index = bucket.indexOf(stream);
+ assert(index !== -1);
+ bucket.splice(index, 1);
+ if (bucket.length === 0) {
+ delete this._streamPriorities[stream._priority];
+ }
+
+ this._insert(stream, priority);
+};
+
+// Creating an *inbound* stream with the given ID. It is called when there's an incoming frame to
+// a previously nonexistent stream.
+Connection.prototype._createIncomingStream = function _createIncomingStream(id) {
+ this._log.debug({ stream_id: id }, 'New incoming stream.');
+
+ var stream = new Stream(this._log, this);
+ this._allocateId(stream, id);
+ this._allocatePriority(stream);
+ this.emit('stream', stream, id);
+
+ return stream;
+};
+
+// Creating an *outbound* stream
+Connection.prototype.createStream = function createStream() {
+ this._log.trace('Creating new outbound stream.');
+
+ // * Receiving is enabled immediately, and an ID gets assigned to the stream
+ var stream = new Stream(this._log, this);
+ this._allocatePriority(stream);
+
+ return stream;
+};
+
+// Multiplexing
+// ------------
+
+Connection.prototype._initializeMultiplexing = function _initializeMultiplexing() {
+ this.on('window_update', this.emit.bind(this, 'wakeup'));
+ this._sendScheduled = false;
+ this._firstFrameReceived = false;
+};
+
+// The `_send` method is a virtual method of the [Flow class](flow.html) that has to be implemented
+// by child classes. It reads frames from streams and pushes them to the output buffer.
+Connection.prototype._send = function _send(immediate) {
+ // * Do not do anything if the connection is already closed
+ if (this._closed) {
+ return;
+ }
+
+ // * Collapsing multiple calls in a turn into a single deferred call
+ if (immediate) {
+ this._sendScheduled = false;
+ } else {
+ if (!this._sendScheduled) {
+ this._sendScheduled = true;
+ setImmediate(this._send.bind(this, true));
+ }
+ return;
+ }
+
+ this._log.trace('Starting forwarding frames from streams.');
+
+ // * Looping through priority `bucket`s in priority order.
+priority_loop:
+ for (var priority in this._streamPriorities) {
+ var bucket = this._streamPriorities[priority];
+ var nextBucket = [];
+
+ // * Forwarding frames from buckets with round-robin scheduling.
+ // 1. pulling out frame
+ // 2. if there's no frame, skip this stream
+ // 3. if forwarding this frame would make `streamCount` greater than `streamLimit`, skip
+ // this stream
+ // 4. adding stream to the bucket of the next round
+ // 5. assigning an ID to the frame (allocating an ID to the stream if there isn't already)
+ // 6. if forwarding a PUSH_PROMISE, allocate ID to the promised stream
+ // 7. forwarding the frame, changing `streamCount` as appropriate
+ // 8. stepping to the next stream if there's still more frame needed in the output buffer
+ // 9. switching to the bucket of the next round
+ while (bucket.length > 0) {
+ for (var index = 0; index < bucket.length; index++) {
+ var stream = bucket[index];
+ var frame = stream.upstream.read((this._window > 0) ? this._window : -1);
+
+ if (!frame) {
+ continue;
+ } else if (frame.count_change > this._streamSlotsFree) {
+ stream.upstream.unshift(frame);
+ continue;
+ }
+
+ nextBucket.push(stream);
+
+ if (frame.stream === undefined) {
+ frame.stream = stream.id || this._allocateId(stream);
+ }
+
+ if (frame.type === 'PUSH_PROMISE') {
+ this._allocatePriority(frame.promised_stream);
+ frame.promised_stream = this._allocateId(frame.promised_stream);
+ }
+
+ this._log.trace({ s: stream, frame: frame }, 'Forwarding outgoing frame');
+ var moreNeeded = this.push(frame);
+ this._changeStreamCount(frame.count_change);
+
+ assert(moreNeeded !== null); // The frame shouldn't be unforwarded
+ if (moreNeeded === false) {
+ break priority_loop;
+ }
+ }
+
+ bucket = nextBucket;
+ nextBucket = [];
+ }
+ }
+
+ // * if we couldn't forward any frame, then sleep until window update, or some other wakeup event
+ if (moreNeeded === undefined) {
+ this.once('wakeup', this._send.bind(this));
+ }
+
+ this._log.trace({ moreNeeded: moreNeeded }, 'Stopping forwarding frames from streams.');
+};
+
+// The `_receive` method is another virtual method of the [Flow class](flow.html) that has to be
+// implemented by child classes. It forwards the given frame to the appropriate stream:
+Connection.prototype._receive = function _receive(frame, done) {
+ this._log.trace({ frame: frame }, 'Forwarding incoming frame');
+
+ // * first frame needs to be checked by the `_onFirstFrameReceived` method
+ if (!this._firstFrameReceived) {
+ this._firstFrameReceived = true;
+ this._onFirstFrameReceived(frame);
+ }
+
+ // Do some sanity checking here before we create a stream
+ if ((frame.type == 'SETTINGS' ||
+ frame.type == 'PING' ||
+ frame.type == 'GOAWAY') &&
+ frame.stream != 0) {
+ // Got connection-level frame on a stream - EEP!
+ this.close('PROTOCOL_ERROR');
+ return;
+ } else if ((frame.type == 'DATA' ||
+ frame.type == 'HEADERS' ||
+ frame.type == 'PRIORITY' ||
+ frame.type == 'RST_STREAM' ||
+ frame.type == 'PUSH_PROMISE' ||
+ frame.type == 'CONTINUATION') &&
+ frame.stream == 0) {
+ // Got stream-level frame on connection - EEP!
+ this.close('PROTOCOL_ERROR');
+ return;
+ }
+ // WINDOW_UPDATE can be on either stream or connection
+
+ // * gets the appropriate stream from the stream registry
+ var stream = this._streamIds[frame.stream];
+
+ // * or creates one if it's not in `this.streams`
+ if (!stream) {
+ stream = this._createIncomingStream(frame.stream);
+ }
+
+ // * in case of PUSH_PROMISE, replaces the promised stream id with a new incoming stream
+ if (frame.type === 'PUSH_PROMISE') {
+ frame.promised_stream = this._createIncomingStream(frame.promised_stream);
+ }
+
+ frame.count_change = this._changeStreamCount.bind(this);
+
+ // * and writes it to the `stream`'s `upstream`
+ stream.upstream.write(frame);
+
+ done();
+};
+
+// Settings management
+// -------------------
+
+var defaultSettings = {
+};
+
+// Settings management initialization:
+Connection.prototype._initializeSettingsManagement = function _initializeSettingsManagement(settings) {
+ // * Setting up the callback queue for setting acknowledgements
+ this._settingsAckCallbacks = [];
+
+ // * Sending the initial settings.
+ this._log.debug({ settings: settings },
+ 'Sending the first SETTINGS frame as part of the connection header.');
+ this.set(settings || defaultSettings);
+
+ // * Forwarding SETTINGS frames to the `_receiveSettings` method
+ this.on('SETTINGS', this._receiveSettings);
+ this.on('RECEIVING_SETTINGS_MAX_FRAME_SIZE', this._sanityCheckMaxFrameSize);
+};
+
+// * Checking that the first frame the other endpoint sends is SETTINGS
+Connection.prototype._onFirstFrameReceived = function _onFirstFrameReceived(frame) {
+ if ((frame.stream === 0) && (frame.type === 'SETTINGS')) {
+ this._log.debug('Receiving the first SETTINGS frame as part of the connection header.');
+ } else {
+ this._log.fatal({ frame: frame }, 'Invalid connection header: first frame is not SETTINGS.');
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+};
+
+// Handling of incoming SETTINGS frames.
+Connection.prototype._receiveSettings = function _receiveSettings(frame) {
+ // * If it's an ACK, call the appropriate callback
+ if (frame.flags.ACK) {
+ var callback = this._settingsAckCallbacks.shift();
+ if (callback) {
+ callback();
+ }
+ }
+
+ // * If it's a setting change request, then send an ACK and change the appropriate settings
+ else {
+ if (!this._closed) {
+ this.push({
+ type: 'SETTINGS',
+ flags: { ACK: true },
+ stream: 0,
+ settings: {}
+ });
+ }
+ for (var name in frame.settings) {
+ this.emit('RECEIVING_' + name, frame.settings[name]);
+ }
+ }
+};
+
+Connection.prototype._sanityCheckMaxFrameSize = function _sanityCheckMaxFrameSize(value) {
+ if ((value < 0x4000) || (value >= 0x01000000)) {
+ this._log.fatal('Received invalid value for max frame size: ' + value);
+ this.emit('error');
+ }
+};
+
+// Changing one or more settings value and sending out a SETTINGS frame
+Connection.prototype.set = function set(settings, callback) {
+ // * Calling the callback and emitting event when the change is acknowledges
+ var self = this;
+ this._settingsAckCallbacks.push(function() {
+ for (var name in settings) {
+ self.emit('ACKNOWLEDGED_' + name, settings[name]);
+ }
+ if (callback) {
+ callback();
+ }
+ });
+
+ // * Sending out the SETTINGS frame
+ this.push({
+ type: 'SETTINGS',
+ flags: { ACK: false },
+ stream: 0,
+ settings: settings
+ });
+ for (var name in settings) {
+ this.emit('SENDING_' + name, settings[name]);
+ }
+};
+
+// Lifecycle management
+// --------------------
+
+// The main responsibilities of lifecycle management code:
+//
+// * keeping the connection alive by
+// * sending PINGs when the connection is idle
+// * answering PINGs
+// * ending the connection
+
+Connection.prototype._initializeLifecycleManagement = function _initializeLifecycleManagement() {
+ this._pings = {};
+ this.on('PING', this._receivePing);
+ this.on('GOAWAY', this._receiveGoaway);
+ this._closed = false;
+};
+
+// Generating a string of length 16 with random hexadecimal digits
+Connection.prototype._generatePingId = function _generatePingId() {
+ do {
+ var id = '';
+ for (var i = 0; i < 16; i++) {
+ id += Math.floor(Math.random()*16).toString(16);
+ }
+ } while(id in this._pings);
+ return id;
+};
+
+// Sending a ping and calling `callback` when the answer arrives
+Connection.prototype.ping = function ping(callback) {
+ var id = this._generatePingId();
+ var data = Buffer.from(id, 'hex');
+ this._pings[id] = callback;
+
+ this._log.debug({ data: data }, 'Sending PING.');
+ this.push({
+ type: 'PING',
+ flags: {
+ ACK: false
+ },
+ stream: 0,
+ data: data
+ });
+};
+
+// Answering pings
+Connection.prototype._receivePing = function _receivePing(frame) {
+ if (frame.flags.ACK) {
+ var id = frame.data.toString('hex');
+ if (id in this._pings) {
+ this._log.debug({ data: frame.data }, 'Receiving answer for a PING.');
+ var callback = this._pings[id];
+ if (callback) {
+ callback();
+ }
+ delete this._pings[id];
+ } else {
+ this._log.warn({ data: frame.data }, 'Unsolicited PING answer.');
+ }
+
+ } else {
+ this._log.debug({ data: frame.data }, 'Answering PING.');
+ this.push({
+ type: 'PING',
+ flags: {
+ ACK: true
+ },
+ stream: 0,
+ data: frame.data
+ });
+ }
+};
+
+Connection.prototype.originFrame = function originFrame(originList) {
+ this._log.debug(originList, 'emitting origin frame');
+
+ this.push({
+ type: 'ORIGIN',
+ flags: {},
+ stream: 0,
+ originList : originList,
+ });
+};
+
+// Terminating the connection
+Connection.prototype.close = function close(error) {
+ if (this._closed) {
+ this._log.warn('Trying to close an already closed connection');
+ return;
+ }
+
+ this._log.debug({ error: error }, 'Closing the connection');
+ this.push({
+ type: 'GOAWAY',
+ flags: {},
+ stream: 0,
+ last_stream: this._lastIncomingStream,
+ error: error || 'NO_ERROR'
+ });
+ this.push(null);
+ this._closed = true;
+};
+
+Connection.prototype._receiveGoaway = function _receiveGoaway(frame) {
+ this._log.debug({ error: frame.error }, 'Other end closed the connection');
+ this.push(null);
+ this._closed = true;
+ if (frame.error !== 'NO_ERROR') {
+ this.emit('peerError', frame.error);
+ }
+};
+
+// Flow control
+// ------------
+
+Connection.prototype._initializeFlowControl = function _initializeFlowControl() {
+ // Handling of initial window size of individual streams.
+ this._initialStreamWindowSize = INITIAL_STREAM_WINDOW_SIZE;
+ this.on('new_stream', function(stream) {
+ stream.upstream.setInitialWindow(this._initialStreamWindowSize);
+ });
+ this.on('RECEIVING_SETTINGS_INITIAL_WINDOW_SIZE', this._setInitialStreamWindowSize);
+ this._streamIds[0].upstream.setInitialWindow = function noop() {};
+};
+
+// The initial connection flow control window is 65535 bytes.
+var INITIAL_STREAM_WINDOW_SIZE = 65535;
+
+// A SETTINGS frame can alter the initial flow control window size for all current streams. When the
+// value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the window size of all
+// stream by calling the `setInitialStreamWindowSize` method. The window size has to be modified by
+// the difference between the new value and the old value.
+Connection.prototype._setInitialStreamWindowSize = function _setInitialStreamWindowSize(size) {
+ if ((this._initialStreamWindowSize === Infinity) && (size !== Infinity)) {
+ this._log.error('Trying to manipulate initial flow control window size after flow control was turned off.');
+ this.emit('error', 'FLOW_CONTROL_ERROR');
+ } else {
+ this._log.debug({ size: size }, 'Changing stream initial window size.');
+ this._initialStreamWindowSize = size;
+ this._streamIds.forEach(function(stream) {
+ stream.upstream.setInitialWindow(size);
+ });
+ }
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/endpoint.js b/testing/xpcshell/node-http2/lib/protocol/endpoint.js
new file mode 100644
index 0000000000..127f4c4c5c
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/endpoint.js
@@ -0,0 +1,262 @@
+var assert = require('assert');
+
+var Serializer = require('./framer').Serializer;
+var Deserializer = require('./framer').Deserializer;
+var Compressor = require('./compressor').Compressor;
+var Decompressor = require('./compressor').Decompressor;
+var Connection = require('./connection').Connection;
+var Duplex = require('stream').Duplex;
+var Transform = require('stream').Transform;
+
+exports.Endpoint = Endpoint;
+
+// The Endpoint class
+// ==================
+
+// Public API
+// ----------
+
+// - **new Endpoint(log, role, settings, filters)**: create a new Endpoint.
+//
+// - `log`: bunyan logger of the parent
+// - `role`: 'CLIENT' or 'SERVER'
+// - `settings`: initial HTTP/2 settings
+// - `filters`: a map of functions that filter the traffic between components (for debugging or
+// intentional failure injection).
+//
+// Filter functions get three arguments:
+// 1. `frame`: the current frame
+// 2. `forward(frame)`: function that can be used to forward a frame to the next component
+// 3. `done()`: callback to signal the end of the filter process
+//
+// Valid filter names and their position in the stack:
+// - `beforeSerialization`: after compression, before serialization
+// - `beforeCompression`: after multiplexing, before compression
+// - `afterDeserialization`: after deserialization, before decompression
+// - `afterDecompression`: after decompression, before multiplexing
+//
+// * **Event: 'stream' (Stream)**: 'stream' event forwarded from the underlying Connection
+//
+// * **Event: 'error' (type)**: signals an error
+//
+// * **createStream(): Stream**: initiate a new stream (forwarded to the underlying Connection)
+//
+// * **close([error])**: close the connection with an error code
+
+// Constructor
+// -----------
+
+// The process of initialization:
+function Endpoint(log, role, settings, filters) {
+ Duplex.call(this);
+
+ // * Initializing logging infrastructure
+ this._log = log.child({ component: 'endpoint', e: this });
+
+ // * First part of the handshake process: sending and receiving the client connection header
+ // prelude.
+ assert((role === 'CLIENT') || role === 'SERVER');
+ if (role === 'CLIENT') {
+ this._writePrelude();
+ } else {
+ this._readPrelude();
+ }
+
+ // * Initialization of component. This includes the second part of the handshake process:
+ // sending the first SETTINGS frame. This is done by the connection class right after
+ // initialization.
+ this._initializeDataFlow(role, settings, filters || {});
+
+ // * Initialization of management code.
+ this._initializeManagement();
+
+ // * Initializing error handling.
+ this._initializeErrorHandling();
+}
+Endpoint.prototype = Object.create(Duplex.prototype, { constructor: { value: Endpoint } });
+
+// Handshake
+// ---------
+
+var CLIENT_PRELUDE = Buffer.from('PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n');
+
+// Writing the client header is simple and synchronous.
+Endpoint.prototype._writePrelude = function _writePrelude() {
+ this._log.debug('Sending the client connection header prelude.');
+ this.push(CLIENT_PRELUDE);
+};
+
+// The asynchronous process of reading the client header:
+Endpoint.prototype._readPrelude = function _readPrelude() {
+ // * progress in the header is tracker using a `cursor`
+ var cursor = 0;
+
+ // * `_write` is temporarily replaced by the comparator function
+ this._write = function _temporalWrite(chunk, encoding, done) {
+ // * which compares the stored header with the current `chunk` byte by byte and emits the
+ // 'error' event if there's a byte that doesn't match
+ var offset = cursor;
+ while(cursor < CLIENT_PRELUDE.length && (cursor - offset) < chunk.length) {
+ if (CLIENT_PRELUDE[cursor] !== chunk[cursor - offset]) {
+ this._log.fatal({ cursor: cursor, offset: offset, chunk: chunk },
+ 'Client connection header prelude does not match.');
+ this._error('handshake', 'PROTOCOL_ERROR');
+ return;
+ }
+ cursor += 1;
+ }
+
+ // * if the whole header is over, and there were no error then restore the original `_write`
+ // and call it with the remaining part of the current chunk
+ if (cursor === CLIENT_PRELUDE.length) {
+ this._log.debug('Successfully received the client connection header prelude.');
+ delete this._write;
+ chunk = chunk.slice(cursor - offset);
+ this._write(chunk, encoding, done);
+ }
+ };
+};
+
+// Data flow
+// ---------
+
+// +---------------------------------------------+
+// | |
+// | +-------------------------------------+ |
+// | | +---------+ +---------+ +---------+ | |
+// | | | stream1 | | stream2 | | ... | | |
+// | | +---------+ +---------+ +---------+ | |
+// | | connection | |
+// | +-------------------------------------+ |
+// | | ^ |
+// | pipe | | pipe |
+// | v | |
+// | +------------------+------------------+ |
+// | | compressor | decompressor | |
+// | +------------------+------------------+ |
+// | | ^ |
+// | pipe | | pipe |
+// | v | |
+// | +------------------+------------------+ |
+// | | serializer | deserializer | |
+// | +------------------+------------------+ |
+// | | ^ |
+// | _read() | | _write() |
+// | v | |
+// | +------------+ +-----------+ |
+// | |output queue| |input queue| |
+// +------+------------+-----+-----------+-------+
+// | ^
+// read() | | write()
+// v |
+
+function createTransformStream(filter) {
+ var transform = new Transform({ objectMode: true });
+ var push = transform.push.bind(transform);
+ transform._transform = function(frame, encoding, done) {
+ filter(frame, push, done);
+ };
+ return transform;
+}
+
+function pipeAndFilter(stream1, stream2, filter) {
+ if (filter) {
+ stream1.pipe(createTransformStream(filter)).pipe(stream2);
+ } else {
+ stream1.pipe(stream2);
+ }
+}
+
+Endpoint.prototype._initializeDataFlow = function _initializeDataFlow(role, settings, filters) {
+ var firstStreamId, compressorRole, decompressorRole;
+ if (role === 'CLIENT') {
+ firstStreamId = 1;
+ compressorRole = 'REQUEST';
+ decompressorRole = 'RESPONSE';
+ } else {
+ firstStreamId = 2;
+ compressorRole = 'RESPONSE';
+ decompressorRole = 'REQUEST';
+ }
+
+ this._serializer = new Serializer(this._log);
+ this._deserializer = new Deserializer(this._log);
+ this._compressor = new Compressor(this._log, compressorRole);
+ this._decompressor = new Decompressor(this._log, decompressorRole);
+ this._connection = new Connection(this._log, firstStreamId, settings);
+
+ pipeAndFilter(this._connection, this._compressor, filters.beforeCompression);
+ pipeAndFilter(this._compressor, this._serializer, filters.beforeSerialization);
+ pipeAndFilter(this._deserializer, this._decompressor, filters.afterDeserialization);
+ pipeAndFilter(this._decompressor, this._connection, filters.afterDecompression);
+
+ this._connection.on('ACKNOWLEDGED_SETTINGS_HEADER_TABLE_SIZE',
+ this._decompressor.setTableSizeLimit.bind(this._decompressor));
+ this._connection.on('RECEIVING_SETTINGS_HEADER_TABLE_SIZE',
+ this._compressor.setTableSizeLimit.bind(this._compressor));
+};
+
+var noread = {};
+Endpoint.prototype._read = function _read() {
+ this._readableState.sync = true;
+ var moreNeeded = noread, chunk;
+ while (moreNeeded && (chunk = this._serializer.read())) {
+ moreNeeded = this.push(chunk);
+ }
+ if (moreNeeded === noread) {
+ this._serializer.once('readable', this._read.bind(this));
+ }
+ this._readableState.sync = false;
+};
+
+Endpoint.prototype._write = function _write(chunk, encoding, done) {
+ this._deserializer.write(chunk, encoding, done);
+};
+
+// Management
+// --------------
+
+Endpoint.prototype._initializeManagement = function _initializeManagement() {
+ this._connection.on('stream', this.emit.bind(this, 'stream'));
+};
+
+Endpoint.prototype.createStream = function createStream() {
+ return this._connection.createStream();
+};
+
+// Error handling
+// --------------
+
+Endpoint.prototype._initializeErrorHandling = function _initializeErrorHandling() {
+ this._serializer.on('error', this._error.bind(this, 'serializer'));
+ this._deserializer.on('error', this._error.bind(this, 'deserializer'));
+ this._compressor.on('error', this._error.bind(this, 'compressor'));
+ this._decompressor.on('error', this._error.bind(this, 'decompressor'));
+ this._connection.on('error', this._error.bind(this, 'connection'));
+
+ this._connection.on('peerError', this.emit.bind(this, 'peerError'));
+};
+
+Endpoint.prototype._error = function _error(component, error) {
+ this._log.fatal({ source: component, message: error }, 'Fatal error, closing connection');
+ this.close(error);
+ setImmediate(this.emit.bind(this, 'error', error));
+};
+
+Endpoint.prototype.close = function close(error) {
+ this._connection.close(error);
+};
+
+// Bunyan serializers
+// ------------------
+
+exports.serializers = {};
+
+var nextId = 0;
+exports.serializers.e = function(endpoint) {
+ if (!('id' in endpoint)) {
+ endpoint.id = nextId;
+ nextId += 1;
+ }
+ return endpoint.id;
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/flow.js b/testing/xpcshell/node-http2/lib/protocol/flow.js
new file mode 100644
index 0000000000..6bec857551
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/flow.js
@@ -0,0 +1,345 @@
+var assert = require('assert');
+
+// The Flow class
+// ==============
+
+// Flow is a [Duplex stream][1] subclass which implements HTTP/2 flow control. It is designed to be
+// subclassed by [Connection](connection.html) and the `upstream` component of [Stream](stream.html).
+// [1]: https://nodejs.org/api/stream.html#stream_class_stream_duplex
+
+var Duplex = require('stream').Duplex;
+
+exports.Flow = Flow;
+
+// Public API
+// ----------
+
+// * **Event: 'error' (type)**: signals an error
+//
+// * **setInitialWindow(size)**: the initial flow control window size can be changed *any time*
+// ([as described in the standard][1]) using this method
+//
+// [1]: https://tools.ietf.org/html/rfc7540#section-6.9.2
+
+// API for child classes
+// ---------------------
+
+// * **new Flow([flowControlId])**: creating a new flow that will listen for WINDOW_UPDATES frames
+// with the given `flowControlId` (or every update frame if not given)
+//
+// * **_send()**: called when more frames should be pushed. The child class is expected to override
+// this (instead of the `_read` method of the Duplex class).
+//
+// * **_receive(frame, readyCallback)**: called when there's an incoming frame. The child class is
+// expected to override this (instead of the `_write` method of the Duplex class).
+//
+// * **push(frame): bool**: schedules `frame` for sending.
+//
+// Returns `true` if it needs more frames in the output queue, `false` if the output queue is
+// full, and `null` if did not push the frame into the output queue (instead, it pushed it into
+// the flow control queue).
+//
+// * **read(limit): frame**: like the regular `read`, but the 'flow control size' (0 for non-DATA
+// frames, length of the payload for DATA frames) of the returned frame will be under `limit`.
+// Small exception: pass -1 as `limit` if the max. flow control size is 0. `read(0)` means the
+// same thing as [in the original API](https://nodejs.org/api/stream.html#stream_stream_read_0).
+//
+// * **getLastQueuedFrame(): frame**: returns the last frame in output buffers
+//
+// * **_log**: the Flow class uses the `_log` object of the parent
+
+// Constructor
+// -----------
+
+// When a HTTP/2.0 connection is first established, new streams are created with an initial flow
+// control window size of 65535 bytes.
+var INITIAL_WINDOW_SIZE = 65535;
+
+// `flowControlId` is needed if only specific WINDOW_UPDATEs should be watched.
+function Flow(flowControlId) {
+ Duplex.call(this, { objectMode: true });
+
+ this._window = this._initialWindow = INITIAL_WINDOW_SIZE;
+ this._flowControlId = flowControlId;
+ this._queue = [];
+ this._ended = false;
+ this._received = 0;
+}
+Flow.prototype = Object.create(Duplex.prototype, { constructor: { value: Flow } });
+
+// Incoming frames
+// ---------------
+
+// `_receive` is called when there's an incoming frame.
+Flow.prototype._receive = function _receive(frame, callback) {
+ throw new Error('The _receive(frame, callback) method has to be overridden by the child class!');
+};
+
+// `_receive` is called by `_write` which in turn is [called by Duplex][1] when someone `write()`s
+// to the flow. It emits the 'receiving' event and notifies the window size tracking code if the
+// incoming frame is a WINDOW_UPDATE.
+// [1]: https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback_1
+Flow.prototype._write = function _write(frame, encoding, callback) {
+ var sentToUs = (this._flowControlId === undefined) || (frame.stream === this._flowControlId);
+
+ if (sentToUs && (frame.flags.END_STREAM || (frame.type === 'RST_STREAM'))) {
+ this._ended = true;
+ }
+
+ if ((frame.type === 'DATA') && (frame.data.length > 0)) {
+ this._receive(frame, function() {
+ this._received += frame.data.length;
+ if (!this._restoreWindowTimer) {
+ this._restoreWindowTimer = setImmediate(this._restoreWindow.bind(this));
+ }
+ callback();
+ }.bind(this));
+ }
+
+ else {
+ this._receive(frame, callback);
+ }
+
+ if (sentToUs && (frame.type === 'WINDOW_UPDATE')) {
+ this._updateWindow(frame);
+ }
+};
+
+// `_restoreWindow` basically acknowledges the DATA frames received since it's last call. It sends
+// a WINDOW_UPDATE that restores the flow control window of the remote end.
+// TODO: push this directly into the output queue. No need to wait for DATA frames in the queue.
+Flow.prototype._restoreWindow = function _restoreWindow() {
+ delete this._restoreWindowTimer;
+ if (!this._ended && (this._received > 0)) {
+ this.push({
+ type: 'WINDOW_UPDATE',
+ flags: {},
+ stream: this._flowControlId,
+ window_size: this._received
+ });
+ this._received = 0;
+ }
+};
+
+// Outgoing frames - sending procedure
+// -----------------------------------
+
+// flow
+// +-------------------------------------------------+
+// | |
+// +--------+ +---------+ |
+// read() | output | _read() | flow | _send() |
+// <----------| |<----------| control |<------------- |
+// | buffer | | buffer | |
+// +--------+ +---------+ |
+// | input | |
+// ---------->| |-----------------------------------> |
+// write() | buffer | _write() _receive() |
+// +--------+ |
+// | |
+// +-------------------------------------------------+
+
+// `_send` is called when more frames should be pushed to the output buffer.
+Flow.prototype._send = function _send() {
+ throw new Error('The _send() method has to be overridden by the child class!');
+};
+
+// `_send` is called by `_read` which is in turn [called by Duplex][1] when it wants to have more
+// items in the output queue.
+// [1]: https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback_1
+Flow.prototype._read = function _read() {
+ // * if the flow control queue is empty, then let the user push more frames
+ if (this._queue.length === 0) {
+ this._send();
+ }
+
+ // * if there are items in the flow control queue, then let's put them into the output queue (to
+ // the extent it is possible with respect to the window size and output queue feedback)
+ else if (this._window > 0) {
+ this._readableState.sync = true; // to avoid reentrant calls
+ do {
+ var moreNeeded = this._push(this._queue[0]);
+ if (moreNeeded !== null) {
+ this._queue.shift();
+ }
+ } while (moreNeeded && (this._queue.length > 0));
+ this._readableState.sync = false;
+
+ assert((!moreNeeded) || // * output queue is full
+ (this._queue.length === 0) || // * flow control queue is empty
+ (!this._window && (this._queue[0].type === 'DATA'))); // * waiting for window update
+ }
+
+ // * otherwise, come back when the flow control window is positive
+ else {
+ this.once('window_update', this._read);
+ }
+};
+
+var MAX_PAYLOAD_SIZE = 4096; // Must not be greater than MAX_HTTP_PAYLOAD_SIZE which is 16383
+
+// `read(limit)` is like the `read` of the Readable class, but it guarantess that the 'flow control
+// size' (0 for non-DATA frames, length of the payload for DATA frames) of the returned frame will
+// be under `limit`.
+Flow.prototype.read = function read(limit) {
+ if (limit === 0) {
+ return Duplex.prototype.read.call(this, 0);
+ } else if (limit === -1) {
+ limit = 0;
+ } else if ((limit === undefined) || (limit > MAX_PAYLOAD_SIZE)) {
+ limit = MAX_PAYLOAD_SIZE;
+ }
+
+ // * Looking at the first frame in the queue without pulling it out if possible.
+ var frame = this._readableState.buffer[0];
+ if (!frame && !this._readableState.ended) {
+ this._read();
+ frame = this._readableState.buffer[0];
+ }
+
+ if (frame && (frame.type === 'DATA')) {
+ // * If the frame is DATA, then there's two special cases:
+ // * if the limit is 0, we shouldn't return anything
+ // * if the size of the frame is larger than limit, then the frame should be split
+ if (limit === 0) {
+ return Duplex.prototype.read.call(this, 0);
+ }
+
+ else if (frame.data.length > limit) {
+ this._log.trace({ frame: frame, size: frame.data.length, forwardable: limit },
+ 'Splitting out forwardable part of a DATA frame.');
+ this.unshift({
+ type: 'DATA',
+ flags: {},
+ stream: frame.stream,
+ data: frame.data.slice(0, limit)
+ });
+ frame.data = frame.data.slice(limit);
+ }
+ }
+
+ return Duplex.prototype.read.call(this);
+};
+
+// `_parentPush` pushes the given `frame` into the output queue
+Flow.prototype._parentPush = function _parentPush(frame) {
+ this._log.trace({ frame: frame }, 'Pushing frame into the output queue');
+
+ if (frame && (frame.type === 'DATA') && (this._window !== Infinity)) {
+ this._log.trace({ window: this._window, by: frame.data.length },
+ 'Decreasing flow control window size.');
+ this._window -= frame.data.length;
+ assert(this._window >= 0);
+ }
+
+ return Duplex.prototype.push.call(this, frame);
+};
+
+// `_push(frame)` pushes `frame` into the output queue and decreases the flow control window size.
+// It is capable of splitting DATA frames into smaller parts, if the window size is not enough to
+// push the whole frame. The return value is similar to `push` except that it returns `null` if it
+// did not push the whole frame to the output queue (but maybe it did push part of the frame).
+Flow.prototype._push = function _push(frame) {
+ var data = frame && (frame.type === 'DATA') && frame.data;
+ var maxFrameLength = (this._window < 16384) ? this._window : 16384;
+
+ if (!data || (data.length <= maxFrameLength)) {
+ return this._parentPush(frame);
+ }
+
+ else if (this._window <= 0) {
+ return null;
+ }
+
+ else {
+ this._log.trace({ frame: frame, size: frame.data.length, forwardable: this._window },
+ 'Splitting out forwardable part of a DATA frame.');
+ frame.data = data.slice(maxFrameLength);
+ this._parentPush({
+ type: 'DATA',
+ flags: {},
+ stream: frame.stream,
+ data: data.slice(0, maxFrameLength)
+ });
+ return null;
+ }
+};
+
+// Push `frame` into the flow control queue, or if it's empty, then directly into the output queue
+Flow.prototype.push = function push(frame) {
+ if (frame === null) {
+ this._log.debug('Enqueueing outgoing End Of Stream');
+ } else {
+ this._log.debug({ frame: frame }, 'Enqueueing outgoing frame');
+ }
+
+ var moreNeeded = null;
+ if (this._queue.length === 0) {
+ moreNeeded = this._push(frame);
+ }
+
+ if (moreNeeded === null) {
+ this._queue.push(frame);
+ }
+
+ return moreNeeded;
+};
+
+// `getLastQueuedFrame` returns the last frame in output buffers. This is primarily used by the
+// [Stream](stream.html) class to mark the last frame with END_STREAM flag.
+Flow.prototype.getLastQueuedFrame = function getLastQueuedFrame() {
+ var readableQueue = this._readableState.buffer;
+ return this._queue[this._queue.length - 1] || readableQueue[readableQueue.length - 1];
+};
+
+// Outgoing frames - managing the window size
+// ------------------------------------------
+
+// Flow control window size is manipulated using the `_increaseWindow` method.
+//
+// * Invoking it with `Infinite` means turning off flow control. Flow control cannot be enabled
+// again once disabled. Any attempt to re-enable flow control MUST be rejected with a
+// FLOW_CONTROL_ERROR error code.
+// * A sender MUST NOT allow a flow control window to exceed 2^31 - 1 bytes. The action taken
+// depends on it being a stream or the connection itself.
+
+var WINDOW_SIZE_LIMIT = Math.pow(2, 31) - 1;
+
+Flow.prototype._increaseWindow = function _increaseWindow(size) {
+ if ((this._window === Infinity) && (size !== Infinity)) {
+ this._log.error('Trying to increase flow control window after flow control was turned off.');
+ this.emit('error', 'FLOW_CONTROL_ERROR');
+ } else {
+ this._log.trace({ window: this._window, by: size }, 'Increasing flow control window size.');
+ this._window += size;
+ if ((this._window !== Infinity) && (this._window > WINDOW_SIZE_LIMIT)) {
+ this._log.error('Flow control window grew too large.');
+ this.emit('error', 'FLOW_CONTROL_ERROR');
+ } else {
+ if (size != 0) {
+ this.emit('window_update');
+ }
+ }
+ }
+};
+
+// The `_updateWindow` method gets called every time there's an incoming WINDOW_UPDATE frame. It
+// modifies the flow control window:
+//
+// * Flow control can be disabled for an individual stream by sending a WINDOW_UPDATE with the
+// END_FLOW_CONTROL flag set. The payload of a WINDOW_UPDATE frame that has the END_FLOW_CONTROL
+// flag set is ignored.
+// * A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the amount
+// specified in the frame.
+Flow.prototype._updateWindow = function _updateWindow(frame) {
+ this._increaseWindow(frame.flags.END_FLOW_CONTROL ? Infinity : frame.window_size);
+};
+
+// A SETTINGS frame can alter the initial flow control window size for all current streams. When the
+// value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream by
+// calling the `setInitialWindow` method. The window size has to be modified by the difference
+// between the new value and the old value.
+Flow.prototype.setInitialWindow = function setInitialWindow(initialWindow) {
+ this._increaseWindow(initialWindow - this._initialWindow);
+ this._initialWindow = initialWindow;
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/framer.js b/testing/xpcshell/node-http2/lib/protocol/framer.js
new file mode 100644
index 0000000000..055402f8d8
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/framer.js
@@ -0,0 +1,1166 @@
+// The framer consists of two [Transform Stream][1] subclasses that operate in [object mode][2]:
+// the Serializer and the Deserializer
+// [1]: https://nodejs.org/api/stream.html#stream_class_stream_transform
+// [2]: https://nodejs.org/api/stream.html#stream_new_stream_readable_options
+var assert = require('assert');
+
+var Transform = require('stream').Transform;
+
+exports.Serializer = Serializer;
+exports.Deserializer = Deserializer;
+
+var logData = Boolean(process.env.HTTP2_LOG_DATA);
+
+var MAX_PAYLOAD_SIZE = 16384;
+var WINDOW_UPDATE_PAYLOAD_SIZE = 4;
+
+// Serializer
+// ----------
+//
+// Frame Objects
+// * * * * * * * --+---------------------------
+// | |
+// v v Buffers
+// [] -----> Payload Ser. --[buffers]--> Header Ser. --> * * * *
+// empty adds payload adds header
+// array buffers buffer
+
+function Serializer(log) {
+ this._log = log.child({ component: 'serializer' });
+ Transform.call(this, { objectMode: true });
+}
+Serializer.prototype = Object.create(Transform.prototype, { constructor: { value: Serializer } });
+
+// When there's an incoming frame object, it first generates the frame type specific part of the
+// frame (payload), and then then adds the header part which holds fields that are common to all
+// frame types (like the length of the payload).
+Serializer.prototype._transform = function _transform(frame, encoding, done) {
+ this._log.trace({ frame: frame }, 'Outgoing frame');
+
+ assert(frame.type in Serializer, 'Unknown frame type: ' + frame.type);
+
+ var buffers = [];
+ Serializer[frame.type](frame, buffers);
+ var length = Serializer.commonHeader(frame, buffers);
+
+ assert(length <= MAX_PAYLOAD_SIZE, 'Frame too large!');
+
+ for (var i = 0; i < buffers.length; i++) {
+ if (logData) {
+ this._log.trace({ data: buffers[i] }, 'Outgoing data');
+ }
+ this.push(buffers[i]);
+ }
+
+ done();
+};
+
+// Deserializer
+// ------------
+//
+// Buffers
+// * * * * --------+-------------------------
+// | |
+// v v Frame Objects
+// {} -----> Header Des. --{frame}--> Payload Des. --> * * * * * * *
+// empty adds parsed adds parsed
+// object header properties payload properties
+
+function Deserializer(log, role) {
+ this._role = role;
+ this._log = log.child({ component: 'deserializer' });
+ Transform.call(this, { objectMode: true });
+ this._next(COMMON_HEADER_SIZE);
+}
+Deserializer.prototype = Object.create(Transform.prototype, { constructor: { value: Deserializer } });
+
+// The Deserializer is stateful, and it's two main alternating states are: *waiting for header* and
+// *waiting for payload*. The state is stored in the boolean property `_waitingForHeader`.
+//
+// When entering a new state, a `_buffer` is created that will hold the accumulated data (header or
+// payload). The `_cursor` is used to track the progress.
+Deserializer.prototype._next = function(size) {
+ this._cursor = 0;
+ this._buffer = Buffer.alloc(size);
+ this._waitingForHeader = !this._waitingForHeader;
+ if (this._waitingForHeader) {
+ this._frame = {};
+ }
+};
+
+// Parsing an incoming buffer is an iterative process because it can hold multiple frames if it's
+// large enough. A `cursor` is used to track the progress in parsing the incoming `chunk`.
+Deserializer.prototype._transform = function _transform(chunk, encoding, done) {
+ var cursor = 0;
+
+ if (logData) {
+ this._log.trace({ data: chunk }, 'Incoming data');
+ }
+
+ while(cursor < chunk.length) {
+ // The content of an incoming buffer is first copied to `_buffer`. If it can't hold the full
+ // chunk, then only a part of it is copied.
+ var toCopy = Math.min(chunk.length - cursor, this._buffer.length - this._cursor);
+ chunk.copy(this._buffer, this._cursor, cursor, cursor + toCopy);
+ this._cursor += toCopy;
+ cursor += toCopy;
+
+ // When `_buffer` is full, it's content gets parsed either as header or payload depending on
+ // the actual state.
+
+ // If it's header then the parsed data is stored in a temporary variable and then the
+ // deserializer waits for the specified length payload.
+ if ((this._cursor === this._buffer.length) && this._waitingForHeader) {
+ var payloadSize = Deserializer.commonHeader(this._buffer, this._frame);
+ if (payloadSize <= MAX_PAYLOAD_SIZE) {
+ this._next(payloadSize);
+ } else {
+ this.emit('error', 'FRAME_SIZE_ERROR');
+ return;
+ }
+ }
+
+ // If it's payload then the the frame object is finalized and then gets pushed out.
+ // Unknown frame types are ignored.
+ //
+ // Note: If we just finished the parsing of a header and the payload length is 0, this branch
+ // will also run.
+ if ((this._cursor === this._buffer.length) && !this._waitingForHeader) {
+ if (this._frame.type) {
+ var error = Deserializer[this._frame.type](this._buffer, this._frame, this._role);
+ if (error) {
+ this._log.error('Incoming frame parsing error: ' + error);
+ this.emit('error', error);
+ } else {
+ this._log.trace({ frame: this._frame }, 'Incoming frame');
+ this.push(this._frame);
+ }
+ } else {
+ this._log.error('Unknown type incoming frame');
+ // Ignore it other than logging
+ }
+ this._next(COMMON_HEADER_SIZE);
+ }
+ }
+
+ done();
+};
+
+// [Frame Header](https://tools.ietf.org/html/rfc7540#section-4.1)
+// --------------------------------------------------------------
+//
+// HTTP/2 frames share a common base format consisting of a 9-byte header followed by 0 to 2^24 - 1
+// bytes of data.
+//
+// Additional size limits can be set by specific application uses. HTTP limits the frame size to
+// 16,384 octets by default, though this can be increased by a receiver.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Length (24) |
+// +---------------+---------------+---------------+
+// | Type (8) | Flags (8) |
+// +-+-----------------------------+---------------+---------------+
+// |R| Stream Identifier (31) |
+// +-+-------------------------------------------------------------+
+// | Frame Data (0...) ...
+// +---------------------------------------------------------------+
+//
+// The fields of the frame header are defined as:
+//
+// * Length:
+// The length of the frame data expressed as an unsigned 24-bit integer. The 9 bytes of the frame
+// header are not included in this value.
+//
+// * Type:
+// The 8-bit type of the frame. The frame type determines how the remainder of the frame header
+// and data are interpreted. Implementations MUST ignore unsupported and unrecognized frame types.
+//
+// * Flags:
+// An 8-bit field reserved for frame-type specific boolean flags.
+//
+// Flags are assigned semantics specific to the indicated frame type. Flags that have no defined
+// semantics for a particular frame type MUST be ignored, and MUST be left unset (0) when sending.
+//
+// * R:
+// A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST remain unset
+// (0) when sending and MUST be ignored when receiving.
+//
+// * Stream Identifier:
+// A 31-bit stream identifier. The value 0 is reserved for frames that are associated with the
+// connection as a whole as opposed to an individual stream.
+//
+// The structure and content of the remaining frame data is dependent entirely on the frame type.
+
+var COMMON_HEADER_SIZE = 9;
+
+var frameTypes = [];
+
+var frameFlags = {};
+
+var genericAttributes = ['type', 'flags', 'stream'];
+
+var typeSpecificAttributes = {};
+
+Serializer.commonHeader = function writeCommonHeader(frame, buffers) {
+ var headerBuffer = Buffer.alloc(COMMON_HEADER_SIZE);
+
+ var size = 0;
+ for (var i = 0; i < buffers.length; i++) {
+ size += buffers[i].length;
+ }
+ headerBuffer.writeUInt8(0, 0);
+ headerBuffer.writeUInt16BE(size, 1);
+
+ var typeId = frameTypes.indexOf(frame.type); // If we are here then the type is valid for sure
+ headerBuffer.writeUInt8(typeId, 3);
+
+ var flagByte = 0;
+ for (var flag in frame.flags) {
+ var position = frameFlags[frame.type].indexOf(flag);
+ assert(position !== -1, 'Unknown flag for frame type ' + frame.type + ': ' + flag);
+ if (frame.flags[flag]) {
+ flagByte |= (1 << position);
+ }
+ }
+ headerBuffer.writeUInt8(flagByte, 4);
+
+ assert((0 <= frame.stream) && (frame.stream < 0x7fffffff), frame.stream);
+ headerBuffer.writeUInt32BE(frame.stream || 0, 5);
+
+ buffers.unshift(headerBuffer);
+
+ return size;
+};
+
+Deserializer.commonHeader = function readCommonHeader(buffer, frame) {
+ if (buffer.length < 9) {
+ return 'FRAME_SIZE_ERROR';
+ }
+
+ var totallyWastedByte = buffer.readUInt8(0);
+ var length = buffer.readUInt16BE(1);
+ // We do this just for sanity checking later on, to make sure no one sent us a
+ // frame that's super large.
+ length += totallyWastedByte << 16;
+
+ frame.type = frameTypes[buffer.readUInt8(3)];
+ if (!frame.type) {
+ // We are required to ignore unknown frame types
+ return length;
+ }
+
+ frame.flags = {};
+ var flagByte = buffer.readUInt8(4);
+ var definedFlags = frameFlags[frame.type];
+ for (var i = 0; i < definedFlags.length; i++) {
+ frame.flags[definedFlags[i]] = Boolean(flagByte & (1 << i));
+ }
+
+ frame.stream = buffer.readUInt32BE(5) & 0x7fffffff;
+
+ return length;
+};
+
+// Frame types
+// ===========
+
+// Every frame type is registered in the following places:
+//
+// * `frameTypes`: a register of frame type codes (used by `commonHeader()`)
+// * `frameFlags`: a register of valid flags for frame types (used by `commonHeader()`)
+// * `typeSpecificAttributes`: a register of frame specific frame object attributes (used by
+// logging code and also serves as documentation for frame objects)
+
+// [DATA Frames](https://tools.ietf.org/html/rfc7540#section-6.1)
+// ------------------------------------------------------------
+//
+// DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated with a
+// stream.
+//
+// The DATA frame defines the following flags:
+//
+// * END_STREAM (0x1):
+// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
+// identified stream.
+// * PADDED (0x08):
+// Bit 4 being set indicates that the Pad Length field is present.
+
+frameTypes[0x0] = 'DATA';
+
+frameFlags.DATA = ['END_STREAM', 'RESERVED2', 'RESERVED4', 'PADDED'];
+
+typeSpecificAttributes.DATA = ['data'];
+
+Serializer.DATA = function writeData(frame, buffers) {
+ buffers.push(frame.data);
+};
+
+Deserializer.DATA = function readData(buffer, frame) {
+ var dataOffset = 0;
+ var paddingLength = 0;
+ if (frame.flags.PADDED) {
+ if (buffer.length < 1) {
+ // We must have at least one byte for padding control, but we don't. Bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
+ dataOffset = 1;
+ }
+
+ if (paddingLength) {
+ if (paddingLength >= (buffer.length - 1)) {
+ // We don't have enough room for the padding advertised - bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer.slice(dataOffset, -1 * paddingLength);
+ } else {
+ frame.data = buffer.slice(dataOffset);
+ }
+};
+
+// [HEADERS](https://tools.ietf.org/html/rfc7540#section-6.2)
+// --------------------------------------------------------------
+//
+// The HEADERS frame (type=0x1) allows the sender to create a stream.
+//
+// The HEADERS frame defines the following flags:
+//
+// * END_STREAM (0x1):
+// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
+// identified stream.
+// * END_HEADERS (0x4):
+// The END_HEADERS bit indicates that this frame contains the entire payload necessary to provide
+// a complete set of headers.
+// * PADDED (0x08):
+// Bit 4 being set indicates that the Pad Length field is present.
+// * PRIORITY (0x20):
+// Bit 6 being set indicates that the Exlusive Flag (E), Stream Dependency, and Weight fields are
+// present.
+
+frameTypes[0x1] = 'HEADERS';
+
+frameFlags.HEADERS = ['END_STREAM', 'RESERVED2', 'END_HEADERS', 'PADDED', 'RESERVED5', 'PRIORITY'];
+
+typeSpecificAttributes.HEADERS = ['priorityDependency', 'priorityWeight', 'exclusiveDependency', 'headers', 'data'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Pad Length? (8)|
+// +-+-------------+---------------+-------------------------------+
+// |E| Stream Dependency? (31) |
+// +-+-------------+-----------------------------------------------+
+// | Weight? (8) |
+// +-+-------------+-----------------------------------------------+
+// | Header Block Fragment (*) ...
+// +---------------------------------------------------------------+
+// | Padding (*) ...
+// +---------------------------------------------------------------+
+//
+// The payload of a HEADERS frame contains a Headers Block
+
+Serializer.HEADERS = function writeHeadersPriority(frame, buffers) {
+ if (frame.flags.PRIORITY) {
+ var buffer = Buffer.alloc(5);
+ assert((0 <= frame.priorityDependency) && (frame.priorityDependency <= 0x7fffffff), frame.priorityDependency);
+ buffer.writeUInt32BE(frame.priorityDependency, 0);
+ if (frame.exclusiveDependency) {
+ buffer[0] |= 0x80;
+ }
+ assert((0 <= frame.priorityWeight) && (frame.priorityWeight <= 0xff), frame.priorityWeight);
+ buffer.writeUInt8(frame.priorityWeight, 4);
+ buffers.push(buffer);
+ }
+ buffers.push(frame.data);
+};
+
+Deserializer.HEADERS = function readHeadersPriority(buffer, frame) {
+ var minFrameLength = 0;
+ if (frame.flags.PADDED) {
+ minFrameLength += 1;
+ }
+ if (frame.flags.PRIORITY) {
+ minFrameLength += 5;
+ }
+ if (buffer.length < minFrameLength) {
+ // Peer didn't send enough data - bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+
+ var dataOffset = 0;
+ var paddingLength = 0;
+ if (frame.flags.PADDED) {
+ paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
+ dataOffset = 1;
+ }
+
+ if (frame.flags.PRIORITY) {
+ var dependencyData = Buffer.alloc(4);
+ buffer.copy(dependencyData, 0, dataOffset, dataOffset + 4);
+ dataOffset += 4;
+ frame.exclusiveDependency = !!(dependencyData[0] & 0x80);
+ dependencyData[0] &= 0x7f;
+ frame.priorityDependency = dependencyData.readUInt32BE(0);
+ frame.priorityWeight = buffer.readUInt8(dataOffset);
+ dataOffset += 1;
+ }
+
+ if (paddingLength) {
+ if ((buffer.length - dataOffset) < paddingLength) {
+ // Not enough data left to satisfy the advertised padding - bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer.slice(dataOffset, -1 * paddingLength);
+ } else {
+ frame.data = buffer.slice(dataOffset);
+ }
+};
+
+// [PRIORITY](https://tools.ietf.org/html/rfc7540#section-6.3)
+// -------------------------------------------------------
+//
+// The PRIORITY frame (type=0x2) specifies the sender-advised priority of a stream.
+//
+// The PRIORITY frame does not define any flags.
+
+frameTypes[0x2] = 'PRIORITY';
+
+frameFlags.PRIORITY = [];
+
+typeSpecificAttributes.PRIORITY = ['priorityDependency', 'priorityWeight', 'exclusiveDependency'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |E| Stream Dependency? (31) |
+// +-+-------------+-----------------------------------------------+
+// | Weight? (8) |
+// +-+-------------+
+//
+// The payload of a PRIORITY frame contains an exclusive bit, a 31-bit dependency, and an 8-bit weight
+
+Serializer.PRIORITY = function writePriority(frame, buffers) {
+ var buffer = Buffer.alloc(5);
+ assert((0 <= frame.priorityDependency) && (frame.priorityDependency <= 0x7fffffff), frame.priorityDependency);
+ buffer.writeUInt32BE(frame.priorityDependency, 0);
+ if (frame.exclusiveDependency) {
+ buffer[0] |= 0x80;
+ }
+ assert((0 <= frame.priorityWeight) && (frame.priorityWeight <= 0xff), frame.priorityWeight);
+ buffer.writeUInt8(frame.priorityWeight, 4);
+
+ buffers.push(buffer);
+};
+
+Deserializer.PRIORITY = function readPriority(buffer, frame) {
+ if (buffer.length < 5) {
+ // PRIORITY frames are 5 bytes long. Bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ var dependencyData = Buffer.alloc(4);
+ buffer.copy(dependencyData, 0, 0, 4);
+ frame.exclusiveDependency = !!(dependencyData[0] & 0x80);
+ dependencyData[0] &= 0x7f;
+ frame.priorityDependency = dependencyData.readUInt32BE(0);
+ frame.priorityWeight = buffer.readUInt8(4);
+};
+
+// [RST_STREAM](https://tools.ietf.org/html/rfc7540#section-6.4)
+// -----------------------------------------------------------
+//
+// The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream.
+//
+// No type-flags are defined.
+
+frameTypes[0x3] = 'RST_STREAM';
+
+frameFlags.RST_STREAM = [];
+
+typeSpecificAttributes.RST_STREAM = ['error'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Error Code (32) |
+// +---------------------------------------------------------------+
+//
+// The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error
+// code (see Error Codes). The error code indicates why the stream is being terminated.
+
+Serializer.RST_STREAM = function writeRstStream(frame, buffers) {
+ var buffer = Buffer.alloc(4);
+ var code = errorCodes.indexOf(frame.error);
+ assert((0 <= code) && (code <= 0xffffffff), code);
+ buffer.writeUInt32BE(code, 0);
+ buffers.push(buffer);
+};
+
+Deserializer.RST_STREAM = function readRstStream(buffer, frame) {
+ if (buffer.length < 4) {
+ // RST_STREAM is 4 bytes long. Bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.error = errorCodes[buffer.readUInt32BE(0)];
+ if (!frame.error) {
+ // Unknown error codes are considered equivalent to INTERNAL_ERROR
+ frame.error = 'INTERNAL_ERROR';
+ }
+};
+
+// [SETTINGS](https://tools.ietf.org/html/rfc7540#section-6.5)
+// -------------------------------------------------------
+//
+// The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
+// communicate.
+//
+// The SETTINGS frame defines the following flag:
+
+// * ACK (0x1):
+// Bit 1 being set indicates that this frame acknowledges receipt and application of the peer's
+// SETTINGS frame.
+frameTypes[0x4] = 'SETTINGS';
+
+frameFlags.SETTINGS = ['ACK'];
+
+typeSpecificAttributes.SETTINGS = ['settings'];
+
+// The payload of a SETTINGS frame consists of zero or more settings. Each setting consists of a
+// 16-bit identifier, and an unsigned 32-bit value.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Identifier(16) | Value (32) |
+// +-----------------+---------------------------------------------+
+// ...Value |
+// +---------------------------------+
+//
+// Each setting in a SETTINGS frame replaces the existing value for that setting. Settings are
+// processed in the order in which they appear, and a receiver of a SETTINGS frame does not need to
+// maintain any state other than the current value of settings. Therefore, the value of a setting
+// is the last value that is seen by a receiver. This permits the inclusion of the same settings
+// multiple times in the same SETTINGS frame, though doing so does nothing other than waste
+// connection capacity.
+
+Serializer.SETTINGS = function writeSettings(frame, buffers) {
+ var settings = [], settingsLeft = Object.keys(frame.settings);
+ definedSettings.forEach(function(setting, id) {
+ if (setting.name in frame.settings) {
+ settingsLeft.splice(settingsLeft.indexOf(setting.name), 1);
+ var value = frame.settings[setting.name];
+ settings.push({ id: id, value: setting.flag ? Boolean(value) : value });
+ }
+ });
+ assert(settingsLeft.length === 0, 'Unknown settings: ' + settingsLeft.join(', '));
+
+ var buffer = Buffer.alloc(settings.length * 6);
+ for (var i = 0; i < settings.length; i++) {
+ buffer.writeUInt16BE(settings[i].id & 0xffff, i*6);
+ buffer.writeUInt32BE(settings[i].value, i*6 + 2);
+ }
+
+ buffers.push(buffer);
+};
+
+Deserializer.SETTINGS = function readSettings(buffer, frame, role) {
+ frame.settings = {};
+
+ // Receipt of a SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a connection error
+ // (Section 5.4.1) of type FRAME_SIZE_ERROR.
+ if(frame.flags.ACK && buffer.length != 0) {
+ return 'FRAME_SIZE_ERROR';
+ }
+
+ if (buffer.length % 6 !== 0) {
+ return 'PROTOCOL_ERROR';
+ }
+ for (var i = 0; i < buffer.length / 6; i++) {
+ var id = buffer.readUInt16BE(i*6) & 0xffff;
+ var setting = definedSettings[id];
+ if (setting) {
+ if (role == 'CLIENT' && setting.name == 'SETTINGS_ENABLE_PUSH') {
+ return 'SETTINGS frame on client got SETTINGS_ENABLE_PUSH';
+ }
+ var value = buffer.readUInt32BE(i*6 + 2);
+ frame.settings[setting.name] = setting.flag ? Boolean(value & 0x1) : value;
+ }
+ }
+};
+
+// The following settings are defined:
+var definedSettings = [];
+
+// * SETTINGS_HEADER_TABLE_SIZE (1):
+// Allows the sender to inform the remote endpoint of the size of the header compression table
+// used to decode header blocks.
+definedSettings[1] = { name: 'SETTINGS_HEADER_TABLE_SIZE', flag: false };
+
+// * SETTINGS_ENABLE_PUSH (2):
+// This setting can be use to disable server push. An endpoint MUST NOT send a PUSH_PROMISE frame
+// if it receives this setting set to a value of 0. The default value is 1, which indicates that
+// push is permitted.
+definedSettings[2] = { name: 'SETTINGS_ENABLE_PUSH', flag: true };
+
+// * SETTINGS_MAX_CONCURRENT_STREAMS (3):
+// indicates the maximum number of concurrent streams that the sender will allow.
+definedSettings[3] = { name: 'SETTINGS_MAX_CONCURRENT_STREAMS', flag: false };
+
+// * SETTINGS_INITIAL_WINDOW_SIZE (4):
+// indicates the sender's initial stream window size (in bytes) for new streams.
+definedSettings[4] = { name: 'SETTINGS_INITIAL_WINDOW_SIZE', flag: false };
+
+// * SETTINGS_MAX_FRAME_SIZE (5):
+// indicates the maximum size of a frame the receiver will allow.
+definedSettings[5] = { name: 'SETTINGS_MAX_FRAME_SIZE', flag: false };
+
+// [PUSH_PROMISE](https://tools.ietf.org/html/rfc7540#section-6.6)
+// ---------------------------------------------------------------
+//
+// The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of streams the
+// sender intends to initiate.
+//
+// The PUSH_PROMISE frame defines the following flags:
+//
+// * END_PUSH_PROMISE (0x4):
+// The END_PUSH_PROMISE bit indicates that this frame contains the entire payload necessary to
+// provide a complete set of headers.
+
+frameTypes[0x5] = 'PUSH_PROMISE';
+
+frameFlags.PUSH_PROMISE = ['RESERVED1', 'RESERVED2', 'END_PUSH_PROMISE', 'PADDED'];
+
+typeSpecificAttributes.PUSH_PROMISE = ['promised_stream', 'headers', 'data'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Pad Length? (8)|
+// +-+-------------+-----------------------------------------------+
+// |X| Promised-Stream-ID (31) |
+// +-+-------------------------------------------------------------+
+// | Header Block Fragment (*) ...
+// +---------------------------------------------------------------+
+// | Padding (*) ...
+// +---------------------------------------------------------------+
+//
+// The PUSH_PROMISE frame includes the unsigned 31-bit identifier of
+// the stream the endpoint plans to create along with a minimal set of headers that provide
+// additional context for the stream.
+
+Serializer.PUSH_PROMISE = function writePushPromise(frame, buffers) {
+ var buffer = Buffer.alloc(4);
+
+ var promised_stream = frame.promised_stream;
+ assert((0 <= promised_stream) && (promised_stream <= 0x7fffffff), promised_stream);
+ buffer.writeUInt32BE(promised_stream, 0);
+
+ buffers.push(buffer);
+ buffers.push(frame.data);
+};
+
+Deserializer.PUSH_PROMISE = function readPushPromise(buffer, frame) {
+ if (buffer.length < 4) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ var dataOffset = 0;
+ var paddingLength = 0;
+ if (frame.flags.PADDED) {
+ if (buffer.length < 5) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
+ dataOffset = 1;
+ }
+ frame.promised_stream = buffer.readUInt32BE(dataOffset) & 0x7fffffff;
+ dataOffset += 4;
+ if (paddingLength) {
+ if ((buffer.length - dataOffset) < paddingLength) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer.slice(dataOffset, -1 * paddingLength);
+ } else {
+ frame.data = buffer.slice(dataOffset);
+ }
+};
+
+// [PING](https://tools.ietf.org/html/rfc7540#section-6.7)
+// -----------------------------------------------
+//
+// The PING frame (type=0x6) is a mechanism for measuring a minimal round-trip time from the
+// sender, as well as determining whether an idle connection is still functional.
+//
+// The PING frame defines one type-specific flag:
+//
+// * ACK (0x1):
+// Bit 1 being set indicates that this PING frame is a PING response.
+
+frameTypes[0x6] = 'PING';
+
+frameFlags.PING = ['ACK'];
+
+typeSpecificAttributes.PING = ['data'];
+
+// In addition to the frame header, PING frames MUST contain 8 additional octets of opaque data.
+
+Serializer.PING = function writePing(frame, buffers) {
+ buffers.push(frame.data);
+};
+
+Deserializer.PING = function readPing(buffer, frame) {
+ if (buffer.length !== 8) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer;
+};
+
+// [GOAWAY](https://tools.ietf.org/html/rfc7540#section-6.8)
+// ---------------------------------------------------
+//
+// The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this connection.
+//
+// The GOAWAY frame does not define any flags.
+
+frameTypes[0x7] = 'GOAWAY';
+
+frameFlags.GOAWAY = [];
+
+typeSpecificAttributes.GOAWAY = ['last_stream', 'error'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |X| Last-Stream-ID (31) |
+// +-+-------------------------------------------------------------+
+// | Error Code (32) |
+// +---------------------------------------------------------------+
+//
+// The last stream identifier in the GOAWAY frame contains the highest numbered stream identifier
+// for which the sender of the GOAWAY frame has received frames on and might have taken some action
+// on.
+//
+// The GOAWAY frame also contains a 32-bit error code (see Error Codes) that contains the reason for
+// closing the connection.
+
+Serializer.GOAWAY = function writeGoaway(frame, buffers) {
+ var buffer = Buffer.alloc(8);
+
+ var last_stream = frame.last_stream;
+ assert((0 <= last_stream) && (last_stream <= 0x7fffffff), last_stream);
+ buffer.writeUInt32BE(last_stream, 0);
+
+ var code = errorCodes.indexOf(frame.error);
+ assert((0 <= code) && (code <= 0xffffffff), code);
+ buffer.writeUInt32BE(code, 4);
+
+ buffers.push(buffer);
+};
+
+Deserializer.GOAWAY = function readGoaway(buffer, frame) {
+ if (buffer.length !== 8) {
+ // GOAWAY must have 8 bytes
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.last_stream = buffer.readUInt32BE(0) & 0x7fffffff;
+ frame.error = errorCodes[buffer.readUInt32BE(4)];
+ if (!frame.error) {
+ // Unknown error types are to be considered equivalent to INTERNAL ERROR
+ frame.error = 'INTERNAL_ERROR';
+ }
+};
+
+// [WINDOW_UPDATE](https://tools.ietf.org/html/rfc7540#section-6.9)
+// -----------------------------------------------------------------
+//
+// The WINDOW_UPDATE frame (type=0x8) is used to implement flow control.
+//
+// The WINDOW_UPDATE frame does not define any flags.
+
+frameTypes[0x8] = 'WINDOW_UPDATE';
+
+frameFlags.WINDOW_UPDATE = [];
+
+typeSpecificAttributes.WINDOW_UPDATE = ['window_size'];
+
+// The payload of a WINDOW_UPDATE frame is a 32-bit value indicating the additional number of bytes
+// that the sender can transmit in addition to the existing flow control window. The legal range
+// for this field is 1 to 2^31 - 1 (0x7fffffff) bytes; the most significant bit of this value is
+// reserved.
+
+Serializer.WINDOW_UPDATE = function writeWindowUpdate(frame, buffers) {
+ var buffer = Buffer.alloc(4);
+
+ var window_size = frame.window_size;
+ assert((0 < window_size) && (window_size <= 0x7fffffff), window_size);
+ buffer.writeUInt32BE(window_size, 0);
+
+ buffers.push(buffer);
+};
+
+Deserializer.WINDOW_UPDATE = function readWindowUpdate(buffer, frame) {
+ if (buffer.length !== WINDOW_UPDATE_PAYLOAD_SIZE) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.window_size = buffer.readUInt32BE(0) & 0x7fffffff;
+ if (frame.window_size === 0) {
+ return 'PROTOCOL_ERROR';
+ }
+};
+
+// [CONTINUATION](https://tools.ietf.org/html/rfc7540#section-6.10)
+// ------------------------------------------------------------
+//
+// The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments.
+//
+// The CONTINUATION frame defines the following flag:
+//
+// * END_HEADERS (0x4):
+// The END_HEADERS bit indicates that this frame ends the sequence of header block fragments
+// necessary to provide a complete set of headers.
+
+frameTypes[0x9] = 'CONTINUATION';
+
+frameFlags.CONTINUATION = ['RESERVED1', 'RESERVED2', 'END_HEADERS'];
+
+typeSpecificAttributes.CONTINUATION = ['headers', 'data'];
+
+Serializer.CONTINUATION = function writeContinuation(frame, buffers) {
+ buffers.push(frame.data);
+};
+
+Deserializer.CONTINUATION = function readContinuation(buffer, frame) {
+ frame.data = buffer;
+};
+
+// [ALTSVC](https://tools.ietf.org/html/rfc7838#section-4)
+// ------------------------------------------------------------
+//
+// The ALTSVC frame (type=0xA) advertises the availability of an alternative service to the client.
+//
+// The ALTSVC frame does not define any flags.
+
+frameTypes[0xA] = 'ALTSVC';
+
+frameFlags.ALTSVC = [];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Origin-Len (16) | Origin? (*) ...
+// +-------------------------------+----------------+--------------+
+// | Alt-Svc-Field-Value (*) ...
+// +---------------------------------------------------------------+
+//
+// The ALTSVC frame contains the following fields:
+//
+// Origin-Len: An unsigned, 16-bit integer indicating the length, in
+// octets, of the Origin field.
+//
+// Origin: An OPTIONAL sequence of characters containing ASCII
+// serialisation of an origin ([RFC6454](https://tools.ietf.org/html/rfc6454),
+// Section 6.2) that the alternate service is applicable to.
+//
+// Alt-Svc-Field-Value: A sequence of octets (length determined by
+// subtracting the length of all preceding fields from the frame
+// length) containing a value identical to the Alt-Svc field value
+// defined in (Section 3)[https://tools.ietf.org/html/rfc7838#section-3]
+// (ABNF production "Alt-Svc").
+
+typeSpecificAttributes.ALTSVC = ['maxAge', 'port', 'protocolID', 'host',
+ 'origin'];
+
+function istchar(c) {
+ return ('!#$&\'*+-.^_`|~1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'.indexOf(c) > -1);
+}
+
+function hexencode(s) {
+ var t = '';
+ for (var i = 0; i < s.length; i++) {
+ if (!istchar(s[i])) {
+ t += '%';
+ t += Buffer.from(s[i]).toString('hex');
+ } else {
+ t += s[i];
+ }
+ }
+ return t;
+}
+
+Serializer.ALTSVC = function writeAltSvc(frame, buffers) {
+ var buffer = Buffer.alloc(2);
+ buffer.writeUInt16BE(frame.origin.length, 0);
+ buffers.push(buffer);
+ buffers.push(Buffer.from(frame.origin, 'ascii'));
+
+ var fieldValue = hexencode(frame.protocolID) + '="' + frame.host + ':' + frame.port + '"';
+ if (frame.maxAge !== 86400) { // 86400 is the default
+ fieldValue += "; ma=" + frame.maxAge;
+ }
+
+ buffers.push(Buffer.from(fieldValue, 'ascii'));
+};
+
+function stripquotes(s) {
+ var start = 0;
+ var end = s.length;
+ while ((start < end) && (s[start] === '"')) {
+ start++;
+ }
+ while ((end > start) && (s[end - 1] === '"')) {
+ end--;
+ }
+ if (start >= end) {
+ return "";
+ }
+ return s.substring(start, end);
+}
+
+function splitNameValue(nvpair) {
+ var eq = -1;
+ var inQuotes = false;
+
+ for (var i = 0; i < nvpair.length; i++) {
+ if (nvpair[i] === '"') {
+ inQuotes = !inQuotes;
+ continue;
+ }
+ if (inQuotes) {
+ continue;
+ }
+ if (nvpair[i] === '=') {
+ eq = i;
+ break;
+ }
+ }
+
+ if (eq === -1) {
+ return {'name': nvpair, 'value': null};
+ }
+
+ var name = stripquotes(nvpair.substring(0, eq).trim());
+ var value = stripquotes(nvpair.substring(eq + 1).trim());
+ return {'name': name, 'value': value};
+}
+
+function splitHeaderParameters(hv) {
+ return parseHeaderValue(hv, ';', splitNameValue);
+}
+
+function parseHeaderValue(hv, separator, callback) {
+ var start = 0;
+ var inQuotes = false;
+ var values = [];
+
+ for (var i = 0; i < hv.length; i++) {
+ if (hv[i] === '"') {
+ inQuotes = !inQuotes;
+ continue;
+ }
+ if (inQuotes) {
+ // Just skip this
+ continue;
+ }
+ if (hv[i] === separator) {
+ var newValue = hv.substring(start, i).trim();
+ if (newValue.length > 0) {
+ newValue = callback(newValue);
+ values.push(newValue);
+ }
+ start = i + 1;
+ }
+ }
+
+ var newValue = hv.substring(start).trim();
+ if (newValue.length > 0) {
+ newValue = callback(newValue);
+ values.push(newValue);
+ }
+
+ return values;
+}
+
+function rsplit(s, delim, count) {
+ var nsplits = 0;
+ var end = s.length;
+ var rval = [];
+ for (var i = s.length - 1; i >= 0; i--) {
+ if (s[i] === delim) {
+ var t = s.substring(i + 1, end);
+ end = i;
+ rval.unshift(t);
+ nsplits++;
+ if (nsplits === count) {
+ break;
+ }
+ }
+ }
+ if (end !== 0) {
+ rval.unshift(s.substring(0, end));
+ }
+ return rval;
+}
+
+function ishex(c) {
+ return ('0123456789ABCDEFabcdef'.indexOf(c) > -1);
+}
+
+function unescape(s) {
+ var i = 0;
+ var t = '';
+ while (i < s.length) {
+ if (s[i] != '%' || !ishex(s[i + 1]) || !ishex(s[i + 2])) {
+ t += s[i];
+ } else {
+ ++i;
+ var hexvalue = '';
+ if (i < s.length) {
+ hexvalue += s[i];
+ ++i;
+ }
+ if (i < s.length) {
+ hexvalue += s[i];
+ }
+ if (hexvalue.length > 0) {
+ t += Buffer.from(hexvalue, 'hex').toString();
+ } else {
+ t += '%';
+ }
+ }
+
+ ++i;
+ }
+ return t;
+}
+
+Deserializer.ALTSVC = function readAltSvc(buffer, frame) {
+ if (buffer.length < 2) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ var originLength = buffer.readUInt16BE(0);
+ if ((buffer.length - 2) < originLength) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.origin = buffer.toString('ascii', 2, 2 + originLength);
+ var fieldValue = buffer.toString('ascii', 2 + originLength);
+ var values = parseHeaderValue(fieldValue, ',', splitHeaderParameters);
+ if (values.length > 1) {
+ // TODO - warn that we only use one here
+ }
+ if (values.length === 0) {
+ // Well that's a malformed frame. Just ignore it.
+ return;
+ }
+
+ var chosenAltSvc = values[0];
+ frame.maxAge = 86400; // Default
+ for (var i = 0; i < chosenAltSvc.length; i++) {
+ if (i === 0) {
+ // This corresponds to the protocolID="<host>:<port>" item
+ frame.protocolID = unescape(chosenAltSvc[i].name);
+ var hostport = rsplit(chosenAltSvc[i].value, ':', 1);
+ frame.host = hostport[0];
+ frame.port = parseInt(hostport[1], 10);
+ } else if (chosenAltSvc[i].name == 'ma') {
+ frame.maxAge = parseInt(chosenAltSvc[i].value, 10);
+ }
+ // Otherwise, we just ignore this
+ }
+};
+
+// frame 0xB was BLOCKED and some versions of chrome will
+// throw PROTOCOL_ERROR upon seeing it with non 0 payload
+
+frameTypes[0xC] = 'ORIGIN';
+frameFlags.ORIGIN = [];
+typeSpecificAttributes.ORIGIN = ['originList'];
+
+Serializer.ORIGIN = function writeOrigin(frame, buffers) {
+ for (var i = 0; i < frame.originList.length; i++) {
+ var buffer = Buffer.alloc(2);
+ buffer.writeUInt16BE(frame.originList[i].length, 0);
+ buffers.push(buffer);
+ buffers.push(Buffer.from(frame.originList[i], 'ascii'));
+ }
+};
+
+Deserializer.ORIGIN = function readOrigin(buffer, frame) {
+ // ignored
+};
+
+
+// [Error Codes](https://tools.ietf.org/html/rfc7540#section-7)
+// ------------------------------------------------------------
+
+var errorCodes = [
+ 'NO_ERROR',
+ 'PROTOCOL_ERROR',
+ 'INTERNAL_ERROR',
+ 'FLOW_CONTROL_ERROR',
+ 'SETTINGS_TIMEOUT',
+ 'STREAM_CLOSED',
+ 'FRAME_SIZE_ERROR',
+ 'REFUSED_STREAM',
+ 'CANCEL',
+ 'COMPRESSION_ERROR',
+ 'CONNECT_ERROR',
+ 'ENHANCE_YOUR_CALM',
+ 'INADEQUATE_SECURITY',
+ 'HTTP_1_1_REQUIRED'
+];
+
+// Logging
+// -------
+
+// [Bunyan serializers](https://github.com/trentm/node-bunyan#serializers) to improve logging output
+// for debug messages emitted in this component.
+exports.serializers = {};
+
+// * `frame` serializer: it transforms data attributes from Buffers to hex strings and filters out
+// flags that are not present.
+var frameCounter = 0;
+exports.serializers.frame = function(frame) {
+ if (!frame) {
+ return null;
+ }
+
+ if ('id' in frame) {
+ return frame.id;
+ }
+
+ frame.id = frameCounter;
+ frameCounter += 1;
+
+ var logEntry = { id: frame.id };
+ genericAttributes.concat(typeSpecificAttributes[frame.type]).forEach(function(name) {
+ logEntry[name] = frame[name];
+ });
+
+ if (frame.data instanceof Buffer) {
+ if (logEntry.data.length > 50) {
+ logEntry.data = frame.data.slice(0, 47).toString('hex') + '...';
+ } else {
+ logEntry.data = frame.data.toString('hex');
+ }
+
+ if (!('length' in logEntry)) {
+ logEntry.length = frame.data.length;
+ }
+ }
+
+ if (frame.promised_stream instanceof Object) {
+ logEntry.promised_stream = 'stream-' + frame.promised_stream.id;
+ }
+
+ logEntry.flags = Object.keys(frame.flags || {}).filter(function(name) {
+ return frame.flags[name] === true;
+ });
+
+ return logEntry;
+};
+
+// * `data` serializer: it simply transforms a buffer to a hex string.
+exports.serializers.data = function(data) {
+ return data.toString('hex');
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/index.js b/testing/xpcshell/node-http2/lib/protocol/index.js
new file mode 100644
index 0000000000..0f3720e2ce
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/index.js
@@ -0,0 +1,91 @@
+// This is an implementation of the [HTTP/2][http2]
+// framing layer for [node.js][node].
+//
+// The main building blocks are [node.js streams][node-stream] that are connected through pipes.
+//
+// The main components are:
+//
+// * [Endpoint](endpoint.html): represents an HTTP/2 endpoint (client or server). It's
+// responsible for the the first part of the handshake process (sending/receiving the
+// [connection header][http2-connheader]) and manages other components (framer, compressor,
+// connection, streams) that make up a client or server.
+//
+// * [Connection](connection.html): multiplexes the active HTTP/2 streams, manages connection
+// lifecycle and settings, and responsible for enforcing the connection level limits (flow
+// control, initiated stream limit)
+//
+// * [Stream](stream.html): implementation of the [HTTP/2 stream concept][http2-stream].
+// Implements the [stream state machine][http2-streamstate] defined by the standard, provides
+// management methods and events for using the stream (sending/receiving headers, data, etc.),
+// and enforces stream level constraints (flow control, sending only legal frames).
+//
+// * [Flow](flow.html): implements flow control for Connection and Stream as parent class.
+//
+// * [Compressor and Decompressor](compressor.html): compression and decompression of HEADER and
+// PUSH_PROMISE frames
+//
+// * [Serializer and Deserializer](framer.html): the lowest layer in the stack that transforms
+// between the binary and the JavaScript object representation of HTTP/2 frames
+//
+// [http2]: https://tools.ietf.org/html/rfc7540
+// [http2-connheader]: https://tools.ietf.org/html/rfc7540#section-3.5
+// [http2-stream]: https://tools.ietf.org/html/rfc7540#section-5
+// [http2-streamstate]: https://tools.ietf.org/html/rfc7540#section-5.1
+// [node]: https://nodejs.org/
+// [node-stream]: https://nodejs.org/api/stream.html
+// [node-https]: https://nodejs.org/api/https.html
+// [node-http]: https://nodejs.org/api/http.html
+
+exports.VERSION = 'h2';
+
+exports.Endpoint = require('./endpoint').Endpoint;
+
+/* Bunyan serializers exported by submodules that are worth adding when creating a logger. */
+exports.serializers = {};
+var modules = ['./framer', './compressor', './flow', './connection', './stream', './endpoint'];
+modules.map(require).forEach(function(module) {
+ for (var name in module.serializers) {
+ exports.serializers[name] = module.serializers[name];
+ }
+});
+
+/*
+ Stream API Endpoint API
+ Stream data
+
+ | ^ | ^
+ | | | |
+ | | | |
+ +-----------|------------|---------------------------------------+
+ | | | Endpoint |
+ | | | |
+ | +-------|------------|-----------------------------------+ |
+ | | | | Connection | |
+ | | v | | |
+ | | +-----------------------+ +-------------------- | |
+ | | | Stream | | Stream ... | |
+ | | +-----------------------+ +-------------------- | |
+ | | | ^ | ^ | |
+ | | v | v | | |
+ | | +------------+--+--------+--+------------+- ... | |
+ | | | ^ | |
+ | | | | | |
+ | +-----------------------|--------|-----------------------+ |
+ | | | |
+ | v | |
+ | +--------------------------+ +--------------------------+ |
+ | | Compressor | | Decompressor | |
+ | +--------------------------+ +--------------------------+ |
+ | | ^ |
+ | v | |
+ | +--------------------------+ +--------------------------+ |
+ | | Serializer | | Deserializer | |
+ | +--------------------------+ +--------------------------+ |
+ | | ^ |
+ +---------------------------|--------|---------------------------+
+ | |
+ v |
+
+ Raw data
+
+*/
diff --git a/testing/xpcshell/node-http2/lib/protocol/stream.js b/testing/xpcshell/node-http2/lib/protocol/stream.js
new file mode 100644
index 0000000000..b80dff0098
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/stream.js
@@ -0,0 +1,677 @@
+var assert = require('assert');
+
+// The Stream class
+// ================
+
+// Stream is a [Duplex stream](https://nodejs.org/api/stream.html#stream_class_stream_duplex)
+// subclass that implements the [HTTP/2 Stream](https://tools.ietf.org/html/rfc7540#section-5)
+// concept. It has two 'sides': one that is used by the user to send/receive data (the `stream`
+// object itself) and one that is used by a Connection to read/write frames to/from the other peer
+// (`stream.upstream`).
+
+var Duplex = require('stream').Duplex;
+
+exports.Stream = Stream;
+
+// Public API
+// ----------
+
+// * **new Stream(log, connection)**: create a new Stream
+//
+// * **Event: 'headers' (headers)**: signals incoming headers
+//
+// * **Event: 'promise' (stream, headers)**: signals an incoming push promise
+//
+// * **Event: 'priority' (priority)**: signals a priority change. `priority` is a number between 0
+// (highest priority) and 2^31-1 (lowest priority). Default value is 2^30.
+//
+// * **Event: 'error' (type)**: signals an error
+//
+// * **headers(headers)**: send headers
+//
+// * **promise(headers): Stream**: promise a stream
+//
+// * **priority(priority)**: set the priority of the stream. Priority can be changed by the peer
+// too, but once it is set locally, it can not be changed remotely.
+//
+// * **reset(error)**: reset the stream with an error code
+//
+// * **upstream**: a [Flow](flow.js) that is used by the parent connection to write/read frames
+// that are to be sent/arrived to/from the peer and are related to this stream.
+//
+// Headers are always in the [regular node.js header format][1].
+// [1]: https://nodejs.org/api/http.html#http_message_headers
+
+// Constructor
+// -----------
+
+// The main aspects of managing the stream are:
+function Stream(log, connection) {
+ Duplex.call(this);
+
+ // * logging
+ this._log = log.child({ component: 'stream', s: this });
+
+ // * receiving and sending stream management commands
+ this._initializeManagement();
+
+ // * sending and receiving frames to/from the upstream connection
+ this._initializeDataFlow();
+
+ // * maintaining the state of the stream (idle, open, closed, etc.) and error detection
+ this._initializeState();
+
+ this.connection = connection;
+ this.sentEndStream = false;
+}
+
+Stream.prototype = Object.create(Duplex.prototype, { constructor: { value: Stream } });
+
+// Managing the stream
+// -------------------
+
+// the default stream priority is 2^30
+var DEFAULT_PRIORITY = Math.pow(2, 30);
+var MAX_PRIORITY = Math.pow(2, 31) - 1;
+
+// PUSH_PROMISE and HEADERS are forwarded to the user through events.
+Stream.prototype._initializeManagement = function _initializeManagement() {
+ this._resetSent = false;
+ this._priority = DEFAULT_PRIORITY;
+ this._letPeerPrioritize = true;
+};
+
+Stream.prototype.promise = function promise(headers) {
+ var stream = new Stream(this._log, this.connection);
+ stream._priority = Math.min(this._priority + 1, MAX_PRIORITY);
+ this._pushUpstream({
+ type: 'PUSH_PROMISE',
+ flags: {},
+ stream: this.id,
+ promised_stream: stream,
+ headers: headers
+ });
+ return stream;
+};
+
+Stream.prototype._onPromise = function _onPromise(frame) {
+ this.emit('promise', frame.promised_stream, frame.headers);
+};
+
+Stream.prototype.headers = function headers(headers) {
+ this._pushUpstream({
+ type: 'HEADERS',
+ flags: {},
+ stream: this.id,
+ headers: headers
+ });
+};
+
+Stream.prototype.trailers = function trailers(trailers) {
+ this.sentEndStream = true;
+ this._pushUpstream({
+ type: 'HEADERS',
+ flags: {'END_STREAM': true},
+ stream: this.id,
+ headers: trailers
+ });
+};
+
+Stream.prototype._onHeaders = function _onHeaders(frame) {
+ if (frame.priority !== undefined) {
+ this.priority(frame.priority, true);
+ }
+ this.emit('headers', frame.headers);
+};
+
+Stream.prototype.priority = function priority(priority, peer) {
+ if ((peer && this._letPeerPrioritize) || !peer) {
+ if (!peer) {
+ this._letPeerPrioritize = false;
+
+ var lastFrame = this.upstream.getLastQueuedFrame();
+ if (lastFrame && ((lastFrame.type === 'HEADERS') || (lastFrame.type === 'PRIORITY'))) {
+ lastFrame.priority = priority;
+ } else {
+ this._pushUpstream({
+ type: 'PRIORITY',
+ flags: {},
+ stream: this.id,
+ priority: priority
+ });
+ }
+ }
+
+ this._log.debug({ priority: priority }, 'Changing priority');
+ this.emit('priority', priority);
+ this._priority = priority;
+ }
+};
+
+Stream.prototype._onPriority = function _onPriority(frame) {
+ this.priority(frame.priority, true);
+};
+
+// Resetting the stream. Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for
+// any stream.
+Stream.prototype.reset = function reset(error) {
+ if (!this._resetSent) {
+ this._resetSent = true;
+ this._pushUpstream({
+ type: 'RST_STREAM',
+ flags: {},
+ stream: this.id,
+ error: error
+ });
+ }
+};
+
+// Specify an alternate service for the origin of this stream
+Stream.prototype.altsvc = function altsvc(host, port, protocolID, maxAge, origin) {
+ var stream;
+ if (origin) {
+ stream = 0;
+ } else {
+ stream = this.id;
+ }
+ this._pushUpstream({
+ type: 'ALTSVC',
+ flags: {},
+ stream: stream,
+ host: host,
+ port: port,
+ protocolID: protocolID,
+ origin: origin,
+ maxAge: maxAge
+ });
+};
+
+// Data flow
+// ---------
+
+// The incoming and the generated outgoing frames are received/transmitted on the `this.upstream`
+// [Flow](flow.html). The [Connection](connection.html) object instantiating the stream will read
+// and write frames to/from it. The stream itself is a regular [Duplex stream][1], and is used by
+// the user to write or read the body of the request.
+// [1]: https://nodejs.org/api/stream.html#stream_class_stream_duplex
+
+// upstream side stream user side
+//
+// +------------------------------------+
+// | |
+// +------------------+ |
+// | upstream | |
+// | | |
+// +--+ | +--|
+// read() | | _send() | _write() | | write(buf)
+// <--------------|B |<--------------|--------------| B|<------------
+// | | | | |
+// frames +--+ | +--| buffers
+// | | | | |
+// -------------->|B |---------------|------------->| B|------------>
+// write(frame) | | _receive() | _read() | | read()
+// +--+ | +--|
+// | | |
+// | | |
+// +------------------+ |
+// | |
+// +------------------------------------+
+//
+// B: input or output buffer
+
+var Flow = require('./flow').Flow;
+
+Stream.prototype._initializeDataFlow = function _initializeDataFlow() {
+ this.id = undefined;
+
+ this._ended = false;
+
+ this.upstream = new Flow();
+ this.upstream._log = this._log;
+ this.upstream._send = this._send.bind(this);
+ this.upstream._receive = this._receive.bind(this);
+ this.upstream.write = this._writeUpstream.bind(this);
+ this.upstream.on('error', this.emit.bind(this, 'error'));
+
+ this.on('finish', this._finishing);
+};
+
+Stream.prototype._pushUpstream = function _pushUpstream(frame) {
+ this.upstream.push(frame);
+ this._transition(true, frame);
+};
+
+// Overriding the upstream's `write` allows us to act immediately instead of waiting for the input
+// queue to empty. This is important in case of control frames.
+Stream.prototype._writeUpstream = function _writeUpstream(frame) {
+ this._log.debug({ frame: frame }, 'Receiving frame');
+
+ var moreNeeded = Flow.prototype.write.call(this.upstream, frame);
+
+ // * Transition to a new state if that's the effect of receiving the frame
+ this._transition(false, frame);
+
+ // * If it's a control frame. Call the appropriate handler method.
+ if (frame.type === 'HEADERS') {
+ if (this._processedHeaders && !frame.flags['END_STREAM']) {
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+ this._processedHeaders = true;
+ this._onHeaders(frame);
+ } else if (frame.type === 'PUSH_PROMISE') {
+ this._onPromise(frame);
+ } else if (frame.type === 'PRIORITY') {
+ this._onPriority(frame);
+ } else if (frame.type === 'ALTSVC') {
+ // TODO
+ } else if (frame.type === 'ORIGIN') {
+ // TODO
+ }
+
+ // * If it's an invalid stream level frame, emit error
+ else if ((frame.type !== 'DATA') &&
+ (frame.type !== 'WINDOW_UPDATE') &&
+ (frame.type !== 'RST_STREAM')) {
+ this._log.error({ frame: frame }, 'Invalid stream level frame');
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+
+ return moreNeeded;
+};
+
+// The `_receive` method (= `upstream._receive`) gets called when there's an incoming frame.
+Stream.prototype._receive = function _receive(frame, ready) {
+ // * If it's a DATA frame, then push the payload into the output buffer on the other side.
+ // Call ready when the other side is ready to receive more.
+ if (!this._ended && (frame.type === 'DATA')) {
+ var moreNeeded = this.push(frame.data);
+ if (!moreNeeded) {
+ this._receiveMore = ready;
+ }
+ }
+
+ // * Any frame may signal the end of the stream with the END_STREAM flag
+ if (!this._ended && (frame.flags.END_STREAM || (frame.type === 'RST_STREAM'))) {
+ this.push(null);
+ this._ended = true;
+ }
+
+ // * Postpone calling `ready` if `push()` returned a falsy value
+ if (this._receiveMore !== ready) {
+ ready();
+ }
+};
+
+// The `_read` method is called when the user side is ready to receive more data. If there's a
+// pending write on the upstream, then call its pending ready callback to receive more frames.
+Stream.prototype._read = function _read() {
+ if (this._receiveMore) {
+ var receiveMore = this._receiveMore;
+ delete this._receiveMore;
+ receiveMore();
+ }
+};
+
+// The `write` method gets called when there's a write request from the user.
+Stream.prototype._write = function _write(buffer, encoding, ready) {
+ // * Chunking is done by the upstream Flow.
+ var moreNeeded = this._pushUpstream({
+ type: 'DATA',
+ flags: {},
+ stream: this.id,
+ data: buffer
+ });
+
+ // * Call ready when upstream is ready to receive more frames.
+ if (moreNeeded) {
+ ready();
+ } else {
+ this._sendMore = ready;
+ }
+};
+
+// The `_send` (= `upstream._send`) method is called when upstream is ready to receive more frames.
+// If there's a pending write on the user side, then call its pending ready callback to receive more
+// writes.
+Stream.prototype._send = function _send() {
+ if (this._sendMore) {
+ var sendMore = this._sendMore;
+ delete this._sendMore;
+ sendMore();
+ }
+};
+
+// When the stream is finishing (the user calls `end()` on it), then we have to set the `END_STREAM`
+// flag on the last frame. If there's no frame in the queue, or if it doesn't support this flag,
+// then we create a 0 length DATA frame. We could do this all the time, but putting the flag on an
+// existing frame is a nice optimization.
+var emptyBuffer = Buffer.alloc(0);
+Stream.prototype._finishing = function _finishing() {
+ var endFrame = {
+ type: 'DATA',
+ flags: { END_STREAM: true },
+ stream: this.id,
+ data: emptyBuffer
+ };
+
+ if (this.sentEndStream) {
+ this._log.debug('Already sent END_STREAM, not sending again.');
+ return;
+ }
+
+ this.sentEndStream = true;
+ var lastFrame = this.upstream.getLastQueuedFrame();
+ if (lastFrame && ((lastFrame.type === 'DATA') || (lastFrame.type === 'HEADERS'))) {
+ this._log.debug({ frame: lastFrame }, 'Marking last frame with END_STREAM flag.');
+ lastFrame.flags.END_STREAM = true;
+ this._transition(true, endFrame);
+ } else {
+ this._pushUpstream(endFrame);
+ }
+};
+
+// [Stream States](https://tools.ietf.org/html/rfc7540#section-5.1)
+// ----------------
+//
+// +--------+
+// PP | | PP
+// ,--------| idle |--------.
+// / | | \
+// v +--------+ v
+// +----------+ | +----------+
+// | | | H | |
+// ,---| reserved | | | reserved |---.
+// | | (local) | v | (remote) | |
+// | +----------+ +--------+ +----------+ |
+// | | ES | | ES | |
+// | | H ,-------| open |-------. | H |
+// | | / | | \ | |
+// | v v +--------+ v v |
+// | +----------+ | +----------+ |
+// | | half | | | half | |
+// | | closed | | R | closed | |
+// | | (remote) | | | (local) | |
+// | +----------+ | +----------+ |
+// | | v | |
+// | | ES / R +--------+ ES / R | |
+// | `----------->| |<-----------' |
+// | R | closed | R |
+// `-------------------->| |<--------------------'
+// +--------+
+
+// Streams begin in the IDLE state and transitions happen when there's an incoming or outgoing frame
+Stream.prototype._initializeState = function _initializeState() {
+ this.state = 'IDLE';
+ this._initiated = undefined;
+ this._closedByUs = undefined;
+ this._closedWithRst = undefined;
+ this._processedHeaders = false;
+};
+
+// Only `_setState` should change `this.state` directly. It also logs the state change and notifies
+// interested parties using the 'state' event.
+Stream.prototype._setState = function transition(state) {
+ assert(this.state !== state);
+ this._log.debug({ from: this.state, to: state }, 'State transition');
+ this.state = state;
+ this.emit('state', state);
+};
+
+// A state is 'active' if the stream in that state counts towards the concurrency limit. Streams
+// that are in the "open" state, or either of the "half closed" states count toward this limit.
+function activeState(state) {
+ return ((state === 'HALF_CLOSED_LOCAL') || (state === 'HALF_CLOSED_REMOTE') || (state === 'OPEN'));
+}
+
+// `_transition` is called every time there's an incoming or outgoing frame. It manages state
+// transitions, and detects stream errors. A stream error is always caused by a frame that is not
+// allowed in the current state.
+Stream.prototype._transition = function transition(sending, frame) {
+ var receiving = !sending;
+ var connectionError;
+ var streamError;
+
+ var DATA = false, HEADERS = false, PRIORITY = false, ALTSVC = false, ORIGIN = false;
+ var RST_STREAM = false, PUSH_PROMISE = false, WINDOW_UPDATE = false;
+ switch(frame.type) {
+ case 'DATA' : DATA = true; break;
+ case 'HEADERS' : HEADERS = true; break;
+ case 'PRIORITY' : PRIORITY = true; break;
+ case 'RST_STREAM' : RST_STREAM = true; break;
+ case 'PUSH_PROMISE' : PUSH_PROMISE = true; break;
+ case 'WINDOW_UPDATE': WINDOW_UPDATE = true; break;
+ case 'ALTSVC' : ALTSVC = true; break;
+ case 'ORIGIN' : ORIGIN = true; break;
+ }
+
+ var previousState = this.state;
+
+ switch (this.state) {
+ // All streams start in the **idle** state. In this state, no frames have been exchanged.
+ //
+ // * Sending or receiving a HEADERS frame causes the stream to become "open".
+ //
+ // When the HEADERS frame contains the END_STREAM flags, then two state transitions happen.
+ case 'IDLE':
+ if (HEADERS) {
+ this._setState('OPEN');
+ if (frame.flags.END_STREAM) {
+ this._setState(sending ? 'HALF_CLOSED_LOCAL' : 'HALF_CLOSED_REMOTE');
+ }
+ this._initiated = sending;
+ } else if (sending && RST_STREAM) {
+ this._setState('CLOSED');
+ } else if (PRIORITY) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // A stream in the **reserved (local)** state is one that has been promised by sending a
+ // PUSH_PROMISE frame.
+ //
+ // * The endpoint can send a HEADERS frame. This causes the stream to open in a "half closed
+ // (remote)" state.
+ // * Either endpoint can send a RST_STREAM frame to cause the stream to become "closed". This
+ // releases the stream reservation.
+ // * An endpoint may receive PRIORITY frame in this state.
+ // * An endpoint MUST NOT send any other type of frame in this state.
+ case 'RESERVED_LOCAL':
+ if (sending && HEADERS) {
+ this._setState('HALF_CLOSED_REMOTE');
+ } else if (RST_STREAM) {
+ this._setState('CLOSED');
+ } else if (PRIORITY) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // A stream in the **reserved (remote)** state has been reserved by a remote peer.
+ //
+ // * Either endpoint can send a RST_STREAM frame to cause the stream to become "closed". This
+ // releases the stream reservation.
+ // * Receiving a HEADERS frame causes the stream to transition to "half closed (local)".
+ // * An endpoint MAY send PRIORITY frames in this state to reprioritize the stream.
+ // * Receiving any other type of frame MUST be treated as a stream error of type PROTOCOL_ERROR.
+ case 'RESERVED_REMOTE':
+ if (RST_STREAM) {
+ this._setState('CLOSED');
+ } else if (receiving && HEADERS) {
+ this._setState('HALF_CLOSED_LOCAL');
+ } else if (PRIORITY || ORIGIN) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // The **open** state is where both peers can send frames. In this state, sending peers observe
+ // advertised stream level flow control limits.
+ //
+ // * From this state either endpoint can send a frame with a END_STREAM flag set, which causes
+ // the stream to transition into one of the "half closed" states: an endpoint sending a
+ // END_STREAM flag causes the stream state to become "half closed (local)"; an endpoint
+ // receiving a END_STREAM flag causes the stream state to become "half closed (remote)".
+ // * Either endpoint can send a RST_STREAM frame from this state, causing it to transition
+ // immediately to "closed".
+ case 'OPEN':
+ if (frame.flags.END_STREAM) {
+ this._setState(sending ? 'HALF_CLOSED_LOCAL' : 'HALF_CLOSED_REMOTE');
+ } else if (RST_STREAM) {
+ this._setState('CLOSED');
+ } else {
+ /* No state change */
+ }
+ break;
+
+ // A stream that is **half closed (local)** cannot be used for sending frames.
+ //
+ // * A stream transitions from this state to "closed" when a frame that contains a END_STREAM
+ // flag is received, or when either peer sends a RST_STREAM frame.
+ // * An endpoint MAY send or receive PRIORITY frames in this state to reprioritize the stream.
+ // * WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
+ case 'HALF_CLOSED_LOCAL':
+ if (RST_STREAM || (receiving && frame.flags.END_STREAM)) {
+ this._setState('CLOSED');
+ } else if (ORIGIN || ALTSVC || receiving || PRIORITY || (sending && WINDOW_UPDATE)) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // A stream that is **half closed (remote)** is no longer being used by the peer to send frames.
+ // In this state, an endpoint is no longer obligated to maintain a receiver flow control window
+ // if it performs flow control.
+ //
+ // * If an endpoint receives additional frames for a stream that is in this state it MUST
+ // respond with a stream error of type STREAM_CLOSED.
+ // * A stream can transition from this state to "closed" by sending a frame that contains a
+ // END_STREAM flag, or when either peer sends a RST_STREAM frame.
+ // * An endpoint MAY send or receive PRIORITY frames in this state to reprioritize the stream.
+ // * A receiver MAY receive a WINDOW_UPDATE frame on a "half closed (remote)" stream.
+ case 'HALF_CLOSED_REMOTE':
+ if (RST_STREAM || (sending && frame.flags.END_STREAM)) {
+ this._setState('CLOSED');
+ } else if (ORIGIN || ALTSVC || sending || PRIORITY || (receiving && WINDOW_UPDATE)) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // The **closed** state is the terminal state.
+ //
+ // * An endpoint MUST NOT send frames on a closed stream. An endpoint that receives a frame
+ // after receiving a RST_STREAM or a frame containing a END_STREAM flag on that stream MUST
+ // treat that as a stream error of type STREAM_CLOSED.
+ // * WINDOW_UPDATE, PRIORITY or RST_STREAM frames can be received in this state for a short
+ // period after a frame containing an END_STREAM flag is sent. Until the remote peer receives
+ // and processes the frame bearing the END_STREAM flag, it might send either frame type.
+ // Endpoints MUST ignore WINDOW_UPDATE frames received in this state, though endpoints MAY
+ // choose to treat WINDOW_UPDATE frames that arrive a significant time after sending
+ // END_STREAM as a connection error of type PROTOCOL_ERROR.
+ // * If this state is reached as a result of sending a RST_STREAM frame, the peer that receives
+ // the RST_STREAM might have already sent - or enqueued for sending - frames on the stream
+ // that cannot be withdrawn. An endpoint that sends a RST_STREAM frame MUST ignore frames that
+ // it receives on closed streams after it has sent a RST_STREAM frame. An endpoint MAY choose
+ // to limit the period over which it ignores frames and treat frames that arrive after this
+ // time as being in error.
+ // * An endpoint might receive a PUSH_PROMISE frame after it sends RST_STREAM. PUSH_PROMISE
+ // causes a stream to become "reserved". If promised streams are not desired, a RST_STREAM
+ // can be used to close any of those streams.
+ case 'CLOSED':
+ if (PRIORITY || (sending && RST_STREAM) ||
+ (receiving && WINDOW_UPDATE) ||
+ (receiving && this._closedByUs &&
+ (this._closedWithRst || RST_STREAM || ALTSVC || ORIGIN))) {
+ /* No state change */
+ } else {
+ streamError = 'STREAM_CLOSED';
+ }
+ break;
+ }
+
+ // Noting that the connection was closed by the other endpoint. It may be important in edge cases.
+ // For example, when the peer tries to cancel a promised stream, but we already sent every data
+ // on it, then the stream is in CLOSED state, yet we want to ignore the incoming RST_STREAM.
+ if ((this.state === 'CLOSED') && (previousState !== 'CLOSED')) {
+ this._closedByUs = sending;
+ this._closedWithRst = RST_STREAM;
+ }
+
+ // Sending/receiving a PUSH_PROMISE
+ //
+ // * Sending a PUSH_PROMISE frame marks the associated stream for later use. The stream state
+ // for the reserved stream transitions to "reserved (local)".
+ // * Receiving a PUSH_PROMISE frame marks the associated stream as reserved by the remote peer.
+ // The state of the stream becomes "reserved (remote)".
+ if (PUSH_PROMISE && !connectionError && !streamError) {
+ /* This assertion must hold, because _transition is called immediately when a frame is written
+ to the stream. If it would be called when a frame gets out of the input queue, the state
+ of the reserved could have been changed by then. */
+ assert(frame.promised_stream.state === 'IDLE', frame.promised_stream.state);
+ frame.promised_stream._setState(sending ? 'RESERVED_LOCAL' : 'RESERVED_REMOTE');
+ frame.promised_stream._initiated = sending;
+ }
+
+ // Signaling how sending/receiving this frame changes the active stream count (-1, 0 or +1)
+ if (this._initiated) {
+ var change = (activeState(this.state) - activeState(previousState));
+ if (sending) {
+ frame.count_change = change;
+ } else {
+ frame.count_change(change);
+ }
+ } else if (sending) {
+ frame.count_change = 0;
+ }
+
+ // Common error handling.
+ if (connectionError || streamError) {
+ var info = {
+ error: connectionError,
+ frame: frame,
+ state: this.state,
+ closedByUs: this._closedByUs,
+ closedWithRst: this._closedWithRst
+ };
+
+ // * When sending something invalid, throwing an exception, since it is probably a bug.
+ if (sending) {
+ this._log.error(info, 'Sending illegal frame.');
+ return this.emit('error', new Error('Sending illegal frame (' + frame.type + ') in ' + this.state + ' state.'));
+ }
+
+ // * In case of a serious problem, emitting and error and letting someone else handle it
+ // (e.g. closing the connection)
+ // * When receiving something invalid, sending an RST_STREAM using the `reset` method.
+ // This will automatically cause a transition to the CLOSED state.
+ else {
+ this._log.error(info, 'Received illegal frame.');
+ if (connectionError) {
+ this.emit('connectionError', connectionError);
+ } else {
+ this.reset(streamError);
+ this.emit('error', streamError);
+ }
+ }
+ }
+};
+
+// Bunyan serializers
+// ------------------
+
+exports.serializers = {};
+
+var nextId = 0;
+exports.serializers.s = function(stream) {
+ if (!('_id' in stream)) {
+ stream._id = nextId;
+ nextId += 1;
+ }
+ return stream._id;
+};
diff --git a/testing/xpcshell/node-http2/package.json b/testing/xpcshell/node-http2/package.json
new file mode 100644
index 0000000000..ca1d3cbb73
--- /dev/null
+++ b/testing/xpcshell/node-http2/package.json
@@ -0,0 +1,46 @@
+{
+ "name": "http2",
+ "version": "3.3.8",
+ "description": "An HTTP/2 client and server implementation",
+ "main": "lib/index.js",
+ "engines": {
+ "node": ">=0.12.0 <9.0.0"
+ },
+ "devDependencies": {
+ "istanbul": "*",
+ "chai": "*",
+ "mocha": "*",
+ "docco": "*",
+ "bunyan": "*"
+ },
+ "scripts": {
+ "test": "istanbul test _mocha -- --reporter spec --slow 500 --timeout 15000",
+ "doc": "docco lib/* --output doc --layout parallel --template root.jst --css doc/docco.css && docco lib/protocol/* --output doc/protocol --layout parallel --template protocol.jst --css doc/docco.css"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/molnarg/node-http2.git"
+ },
+ "homepage": "https://github.com/molnarg/node-http2",
+ "bugs": {
+ "url": "https://github.com/molnarg/node-http2/issues"
+ },
+ "keywords": [
+ "http",
+ "http2",
+ "client",
+ "server"
+ ],
+ "author": "Gábor Molnár <gabor@molnar.es> (http://gabor.molnar.es)",
+ "contributors": [
+ "Nick Hurley",
+ "Mike Belshe",
+ "Yoshihiro Iwanaga",
+ "Igor Novikov",
+ "James Willcox",
+ "David Björklund",
+ "Patrick McManus"
+ ],
+ "license": "MIT",
+ "readmeFilename": "README.md"
+}
diff --git a/testing/xpcshell/node-http2/test/compressor.js b/testing/xpcshell/node-http2/test/compressor.js
new file mode 100644
index 0000000000..4588b8a3fd
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/compressor.js
@@ -0,0 +1,575 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var compressor = require('../lib/protocol/compressor');
+var HeaderTable = compressor.HeaderTable;
+var HuffmanTable = compressor.HuffmanTable;
+var HeaderSetCompressor = compressor.HeaderSetCompressor;
+var HeaderSetDecompressor = compressor.HeaderSetDecompressor;
+var Compressor = compressor.Compressor;
+var Decompressor = compressor.Decompressor;
+
+var test_integers = [{
+ N: 5,
+ I: 10,
+ buffer: Buffer.from([10])
+}, {
+ N: 0,
+ I: 10,
+ buffer: Buffer.from([10])
+}, {
+ N: 5,
+ I: 1337,
+ buffer: Buffer.from([31, 128 + 26, 10])
+}, {
+ N: 0,
+ I: 1337,
+ buffer: Buffer.from([128 + 57, 10])
+}];
+
+var test_strings = [{
+ string: 'www.foo.com',
+ buffer: Buffer.from('89f1e3c2f29ceb90f4ff', 'hex')
+}, {
+ string: 'éáűőúöüó€',
+ buffer: Buffer.from('13c3a9c3a1c5b1c591c3bac3b6c3bcc3b3e282ac', 'hex')
+}];
+
+test_huffman_request = {
+ 'GET': 'c5837f',
+ 'http': '9d29af',
+ '/': '63',
+ 'www.foo.com': 'f1e3c2f29ceb90f4ff',
+ 'https': '9d29ad1f',
+ 'www.bar.com': 'f1e3c2f18ec5c87a7f',
+ 'no-cache': 'a8eb10649cbf',
+ '/custom-path.css': '6096a127a56ac699d72211',
+ 'custom-key': '25a849e95ba97d7f',
+ 'custom-value': '25a849e95bb8e8b4bf'
+};
+
+test_huffman_response = {
+ '302': '6402',
+ 'private': 'aec3771a4b',
+ 'Mon, 21 OCt 2013 20:13:21 GMT': 'd07abe941054d5792a0801654102e059b820a98b46ff',
+ ': https://www.bar.com': 'b8a4e94d68b8c31e3c785e31d8b90f4f',
+ '200': '1001',
+ 'Mon, 21 OCt 2013 20:13:22 GMT': 'd07abe941054d5792a0801654102e059b821298b46ff',
+ 'https://www.bar.com': '9d29ad171863c78f0bc63b1721e9',
+ 'gzip': '9bd9ab',
+ 'foo=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\
+AAAAAAAAAAAAAAAAAAAAAAAAAALASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKHQWOEIUAL\
+QWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKH\
+QWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEO\
+IUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOP\
+IUAXQWEOIUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234ZZZZZZZZZZ\
+ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ1234 m\
+ax-age=3600; version=1': '94e7821861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861873c3bafe5cd8f666bbfbf9ab672c1ab5e4e10fe6ce583564e10fe67cb9b1ece5ab064e10e7d9cb06ac9c21fccfb307087f33e7cd961dd7f672c1ab86487f34844cb59e1dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab27087f33e5cd8f672d583270873ece583564e10fe67d983843f99f3e6cb0eebfb3960d5c3243f9a42265acf0eebf97363d99aefefe6ad9cb06ad793843f9b3960d593843f99f2e6c7b396ac1938439f672c1ab27087f33ecc1c21fccf9f3658775fd9cb06ae1921fcd21132d678775fcb9b1eccd77f7f356ce58356bc9c21fcd9cb06ac9c21fccf97363d9cb560c9c21cfb3960d593843f99f660e10fe67cf9b2c3bafece583570c90fe6908996bf7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f42265a5291f9587316065c003ed4ee5b1063d5007f',
+ 'foo=ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\
+ZZZZZZZZZZZZZZZZZZZZZZZZZZLASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKHQWOEIUAL\
+QWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKH\
+QWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEO\
+IUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOP\
+IUAXQWEOIUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234AAAAAAAAAA\
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1234 m\
+ax-age=3600; version=1': '94e783f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f73c3bafe5cd8f666bbfbf9ab672c1ab5e4e10fe6ce583564e10fe67cb9b1ece5ab064e10e7d9cb06ac9c21fccfb307087f33e7cd961dd7f672c1ab86487f34844cb59e1dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab27087f33e5cd8f672d583270873ece583564e10fe67d983843f99f3e6cb0eebfb3960d5c3243f9a42265acf0eebf97363d99aefefe6ad9cb06ad793843f9b3960d593843f99f2e6c7b396ac1938439f672c1ab27087f33ecc1c21fccf9f3658775fd9cb06ae1921fcd21132d678775fcb9b1eccd77f7f356ce58356bc9c21fcd9cb06ac9c21fccf97363d9cb560c9c21cfb3960d593843f99f660e10fe67cf9b2c3bafece583570c90fe6908996a1861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861842265a5291f9587316065c003ed4ee5b1063d5007f'
+};
+
+var test_headers = [{
+ // index
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('82', 'hex')
+}, {
+ // index
+ header: {
+ name: 5,
+ value: 5,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('86', 'hex')
+}, {
+ // index
+ header: {
+ name: 3,
+ value: 3,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('84', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 0,
+ value: 'www.foo.com',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('41' + '89f1e3c2f29ceb90f4ff', 'hex')
+}, {
+ // indexed
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('82', 'hex')
+}, {
+ // indexed
+ header: {
+ name: 6,
+ value: 6,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('87', 'hex')
+}, {
+ // indexed
+ header: {
+ name: 3,
+ value: 3,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('84', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 0,
+ value: 'www.bar.com',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('41' + '89f1e3c2f18ec5c87a7f', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 23,
+ value: 'no-cache',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('58' + '86a8eb10649cbf', 'hex')
+}, {
+ // index
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('82', 'hex')
+}, {
+ // index
+ header: {
+ name: 6,
+ value: 6,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('87', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 3,
+ value: '/custom-path.css',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('44' + '8b6096a127a56ac699d72211', 'hex')
+}, {
+ // index
+ header: {
+ name: 63,
+ value: 63,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('C0', 'hex')
+}, {
+ // literal w/index, new name & value
+ header: {
+ name: 'custom-key',
+ value: 'custom-value',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('40' + '8825a849e95ba97d7f' + '8925a849e95bb8e8b4bf', 'hex')
+}, {
+ // index
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('82', 'hex')
+}, {
+ // index
+ header: {
+ name: 6,
+ value: 6,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('87', 'hex')
+}, {
+ // index
+ header: {
+ name: 62,
+ value: 62,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('BF', 'hex')
+}, {
+ // index
+ header: {
+ name: 65,
+ value: 65,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('C2', 'hex')
+}, {
+ // index
+ header: {
+ name: 64,
+ value: 64,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('C1', 'hex')
+}, {
+ // index
+ header: {
+ name: 61,
+ value: 61,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('BE', 'hex')
+}, {
+ // Literal w/o index, name index
+ header: {
+ name: 6,
+ value: "whatever",
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('07' + '86f138d25ee5b3', 'hex')
+}, {
+ // Literal w/o index, new name & value
+ header: {
+ name: "foo",
+ value: "bar",
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('00' + '8294e7' + '03626172', 'hex')
+}, {
+ // Literal never indexed, name index
+ header: {
+ name: 6,
+ value: "whatever",
+ index: false,
+ mustNeverIndex: true,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('17' + '86f138d25ee5b3', 'hex')
+}, {
+ // Literal never indexed, new name & value
+ header: {
+ name: "foo",
+ value: "bar",
+ index: false,
+ mustNeverIndex: true,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: Buffer.from('10' + '8294e7' + '03626172', 'hex')
+}, {
+ header: {
+ name: -1,
+ value: -1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: true,
+ newMaxSize: 100
+ },
+ buffer: Buffer.from('3F45', 'hex')
+}];
+
+var test_header_sets = [{
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'http',
+ ':path': '/',
+ ':authority': 'www.foo.com'
+ },
+ buffer: util.concat(test_headers.slice(0, 4).map(function(test) { return test.buffer; }))
+}, {
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'https',
+ ':path': '/',
+ ':authority': 'www.bar.com',
+ 'cache-control': 'no-cache'
+ },
+ buffer: util.concat(test_headers.slice(4, 9).map(function(test) { return test.buffer; }))
+}, {
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'https',
+ ':path': '/custom-path.css',
+ ':authority': 'www.bar.com',
+ 'custom-key': 'custom-value'
+ },
+ buffer: util.concat(test_headers.slice(9, 14).map(function(test) { return test.buffer; }))
+}, {
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'https',
+ ':path': '/custom-path.css',
+ ':authority': ['www.foo.com', 'www.bar.com'],
+ 'custom-key': 'custom-value'
+ },
+ buffer: util.concat(test_headers.slice(14, 19).map(function(test) { return test.buffer; }))
+}];
+
+describe('compressor.js', function() {
+ describe('HeaderTable', function() {
+ });
+
+ describe('HuffmanTable', function() {
+ describe('method encode(buffer)', function() {
+ it('should return the Huffman encoded version of the input buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var decoded in test_huffman_request) {
+ var encoded = test_huffman_request[decoded];
+ expect(table.encode(Buffer.from(decoded)).toString('hex')).to.equal(encoded);
+ }
+ table = HuffmanTable.huffmanTable;
+ for (decoded in test_huffman_response) {
+ encoded = test_huffman_response[decoded];
+ expect(table.encode(Buffer.from(decoded)).toString('hex')).to.equal(encoded);
+ }
+ });
+ });
+ describe('method decode(buffer)', function() {
+ it('should return the Huffman decoded version of the input buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var decoded in test_huffman_request) {
+ var encoded = test_huffman_request[decoded];
+ expect(table.decode(Buffer.from(encoded, 'hex')).toString()).to.equal(decoded);
+ }
+ table = HuffmanTable.huffmanTable;
+ for (decoded in test_huffman_response) {
+ encoded = test_huffman_response[decoded];
+ expect(table.decode(Buffer.from(encoded, 'hex')).toString()).to.equal(decoded);
+ }
+ });
+ });
+ });
+
+ describe('HeaderSetCompressor', function() {
+ describe('static method .integer(I, N)', function() {
+ it('should return an array of buffers that represent the N-prefix coded form of the integer I', function() {
+ for (var i = 0; i < test_integers.length; i++) {
+ var test = test_integers[i];
+ test.buffer.cursor = 0;
+ expect(util.concat(HeaderSetCompressor.integer(test.I, test.N))).to.deep.equal(test.buffer);
+ }
+ });
+ });
+ describe('static method .string(string)', function() {
+ it('should return an array of buffers that represent the encoded form of the string', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_strings.length; i++) {
+ var test = test_strings[i];
+ expect(util.concat(HeaderSetCompressor.string(test.string, table))).to.deep.equal(test.buffer);
+ }
+ });
+ });
+ describe('static method .header({ name, value, index })', function() {
+ it('should return an array of buffers that represent the encoded form of the header', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_headers.length; i++) {
+ var test = test_headers[i];
+ expect(util.concat(HeaderSetCompressor.header(test.header, table))).to.deep.equal(test.buffer);
+ }
+ });
+ });
+ });
+
+ describe('HeaderSetDecompressor', function() {
+ describe('static method .integer(buffer, N)', function() {
+ it('should return the parsed N-prefix coded number and increase the cursor property of buffer', function() {
+ for (var i = 0; i < test_integers.length; i++) {
+ var test = test_integers[i];
+ test.buffer.cursor = 0;
+ expect(HeaderSetDecompressor.integer(test.buffer, test.N)).to.equal(test.I);
+ expect(test.buffer.cursor).to.equal(test.buffer.length);
+ }
+ });
+ });
+ describe('static method .string(buffer)', function() {
+ it('should return the parsed string and increase the cursor property of buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_strings.length; i++) {
+ var test = test_strings[i];
+ test.buffer.cursor = 0;
+ expect(HeaderSetDecompressor.string(test.buffer, table)).to.equal(test.string);
+ expect(test.buffer.cursor).to.equal(test.buffer.length);
+ }
+ });
+ });
+ describe('static method .header(buffer)', function() {
+ it('should return the parsed header and increase the cursor property of buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_headers.length; i++) {
+ var test = test_headers[i];
+ test.buffer.cursor = 0;
+ expect(HeaderSetDecompressor.header(test.buffer, table)).to.deep.equal(test.header);
+ expect(test.buffer.cursor).to.equal(test.buffer.length);
+ }
+ });
+ });
+ });
+ describe('Decompressor', function() {
+ describe('method decompress(buffer)', function() {
+ it('should return the parsed header set in { name1: value1, name2: [value2, value3], ... } format', function() {
+ var decompressor = new Decompressor(util.log, 'REQUEST');
+ for (var i = 0; i < test_header_sets.length - 1; i++) {
+ var header_set = test_header_sets[i];
+ expect(decompressor.decompress(header_set.buffer)).to.deep.equal(header_set.headers);
+ }
+ });
+ });
+ describe('transform stream', function() {
+ it('should emit an error event if a series of header frames is interleaved with other frames', function() {
+ var decompressor = new Decompressor(util.log, 'REQUEST');
+ var error_occured = false;
+ decompressor.on('error', function() {
+ error_occured = true;
+ });
+ decompressor.write({
+ type: 'HEADERS',
+ flags: {
+ END_HEADERS: false
+ },
+ data: Buffer.alloc(5)
+ });
+ decompressor.write({
+ type: 'DATA',
+ flags: {},
+ data: Buffer.alloc(5)
+ });
+ expect(error_occured).to.be.equal(true);
+ });
+ });
+ });
+
+ describe('invariant', function() {
+ describe('decompressor.decompress(compressor.compress(headerset)) === headerset', function() {
+ it('should be true for any header set if the states are synchronized', function() {
+ var compressor = new Compressor(util.log, 'REQUEST');
+ var decompressor = new Decompressor(util.log, 'REQUEST');
+ var n = test_header_sets.length;
+ for (var i = 0; i < 10; i++) {
+ var headers = test_header_sets[i%n].headers;
+ var compressed = compressor.compress(headers);
+ var decompressed = decompressor.decompress(compressed);
+ expect(decompressed).to.deep.equal(headers);
+ expect(compressor._table).to.deep.equal(decompressor._table);
+ }
+ });
+ });
+ describe('source.pipe(compressor).pipe(decompressor).pipe(destination)', function() {
+ it('should behave like source.pipe(destination) for a stream of frames', function(done) {
+ var compressor = new Compressor(util.log, 'RESPONSE');
+ var decompressor = new Decompressor(util.log, 'RESPONSE');
+ var n = test_header_sets.length;
+ compressor.pipe(decompressor);
+ for (var i = 0; i < 10; i++) {
+ compressor.write({
+ type: i%2 ? 'HEADERS' : 'PUSH_PROMISE',
+ flags: {},
+ headers: test_header_sets[i%n].headers
+ });
+ }
+ setTimeout(function() {
+ for (var j = 0; j < 10; j++) {
+ expect(decompressor.read().headers).to.deep.equal(test_header_sets[j%n].headers);
+ }
+ done();
+ }, 10);
+ });
+ });
+ describe('huffmanTable.decompress(huffmanTable.compress(buffer)) === buffer', function() {
+ it('should be true for any buffer', function() {
+ for (var i = 0; i < 10; i++) {
+ var buffer = [];
+ while (Math.random() > 0.1) {
+ buffer.push(Math.floor(Math.random() * 256))
+ }
+ buffer = Buffer.from(buffer);
+ var table = HuffmanTable.huffmanTable;
+ var result = table.decode(table.encode(buffer));
+ expect(result).to.deep.equal(buffer);
+ }
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/connection.js b/testing/xpcshell/node-http2/test/connection.js
new file mode 100644
index 0000000000..21785d620a
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/connection.js
@@ -0,0 +1,237 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var Connection = require('../lib/protocol/connection').Connection;
+
+var settings = {
+ SETTINGS_MAX_CONCURRENT_STREAMS: 100,
+ SETTINGS_INITIAL_WINDOW_SIZE: 100000
+};
+
+var MAX_PRIORITY = Math.pow(2, 31) - 1;
+var MAX_RANDOM_PRIORITY = 10;
+
+function randomPriority() {
+ return Math.floor(Math.random() * (MAX_RANDOM_PRIORITY + 1));
+}
+
+function expectPriorityOrder(priorities) {
+ priorities.forEach(function(bucket, priority) {
+ bucket.forEach(function(stream) {
+ expect(stream._priority).to.be.equal(priority);
+ });
+ });
+}
+
+describe('connection.js', function() {
+ describe('Connection class', function() {
+ describe('method ._insert(stream)', function() {
+ it('should insert the stream in _streamPriorities in a place determined by stream._priority', function() {
+ var streams = [];
+ var connection = Object.create(Connection.prototype, { _streamPriorities: { value: streams }});
+ var streamCount = 10;
+
+ for (var i = 0; i < streamCount; i++) {
+ var stream = { _priority: randomPriority() };
+ connection._insert(stream, stream._priority);
+ expect(connection._streamPriorities[stream._priority]).to.include(stream);
+ }
+
+ expectPriorityOrder(connection._streamPriorities);
+ });
+ });
+ describe('method ._reprioritize(stream)', function() {
+ it('should eject and then insert the stream in _streamPriorities in a place determined by stream._priority', function() {
+ var streams = [];
+ var connection = Object.create(Connection.prototype, { _streamPriorities: { value: streams }});
+ var streamCount = 10;
+ var oldPriority, newPriority, stream;
+
+ for (var i = 0; i < streamCount; i++) {
+ oldPriority = randomPriority();
+ while ((newPriority = randomPriority()) === oldPriority);
+ stream = { _priority: oldPriority };
+ connection._insert(stream, oldPriority);
+ connection._reprioritize(stream, newPriority);
+ stream._priority = newPriority;
+
+ expect(connection._streamPriorities[newPriority]).to.include(stream);
+ expect(connection._streamPriorities[oldPriority] || []).to.not.include(stream);
+ }
+
+ expectPriorityOrder(streams);
+ });
+ });
+ describe('invalid operation', function() {
+ describe('unsolicited ping answer', function() {
+ it('should be ignored', function() {
+ var connection = new Connection(util.log, 1, settings);
+
+ connection._receivePing({
+ stream: 0,
+ type: 'PING',
+ flags: {
+ 'PONG': true
+ },
+ data: Buffer.alloc(8)
+ });
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ var c, s;
+ beforeEach(function() {
+ c = new Connection(util.log.child({ role: 'client' }), 1, settings);
+ s = new Connection(util.log.child({ role: 'client' }), 2, settings);
+ c.pipe(s).pipe(c);
+ });
+
+ describe('connection setup', function() {
+ it('should work as expected', function(done) {
+ setTimeout(function() {
+ // If there are no exception until this, then we're done
+ done();
+ }, 10);
+ });
+ });
+ describe('sending/receiving a request', function() {
+ it('should work as expected', function(done) {
+ // Request and response data
+ var request_headers = {
+ ':method': 'GET',
+ ':path': '/'
+ };
+ var request_data = Buffer.alloc(0);
+ var response_headers = {
+ ':status': '200'
+ };
+ var response_data = Buffer.from('12345678', 'hex');
+
+ // Setting up server
+ s.on('stream', function(server_stream) {
+ server_stream.on('headers', function(headers) {
+ expect(headers).to.deep.equal(request_headers);
+ server_stream.headers(response_headers);
+ server_stream.end(response_data);
+ });
+ });
+
+ // Sending request
+ var client_stream = c.createStream();
+ client_stream.headers(request_headers);
+ client_stream.end(request_data);
+
+ // Waiting for answer
+ done = util.callNTimes(2, done);
+ client_stream.on('headers', function(headers) {
+ expect(headers).to.deep.equal(response_headers);
+ done();
+ });
+ client_stream.on('data', function(data) {
+ expect(data).to.deep.equal(response_data);
+ done();
+ });
+ });
+ });
+ describe('server push', function() {
+ it('should work as expected', function(done) {
+ var request_headers = { ':method': 'get', ':path': '/' };
+ var response_headers = { ':status': '200' };
+ var push_request_headers = { ':method': 'get', ':path': '/x' };
+ var push_response_headers = { ':status': '200' };
+ var response_content = Buffer.alloc(10);
+ var push_content = Buffer.alloc(10);
+
+ done = util.callNTimes(5, done);
+
+ s.on('stream', function(response) {
+ response.headers(response_headers);
+
+ var pushed = response.promise(push_request_headers);
+ pushed.headers(push_response_headers);
+ pushed.end(push_content);
+
+ response.end(response_content);
+ });
+
+ var request = c.createStream();
+ request.headers(request_headers);
+ request.end();
+ request.on('headers', function(headers) {
+ expect(headers).to.deep.equal(response_headers);
+ done();
+ });
+ request.on('data', function(data) {
+ expect(data).to.deep.equal(response_content);
+ done();
+ });
+ request.on('promise', function(pushed, headers) {
+ expect(headers).to.deep.equal(push_request_headers);
+ pushed.on('headers', function(headers) {
+ expect(headers).to.deep.equal(response_headers);
+ done();
+ });
+ pushed.on('data', function(data) {
+ expect(data).to.deep.equal(push_content);
+ done();
+ });
+ pushed.on('end', done);
+ });
+ });
+ });
+ describe('ping from client', function() {
+ it('should work as expected', function(done) {
+ c.ping(function() {
+ done();
+ });
+ });
+ });
+ describe('ping from server', function() {
+ it('should work as expected', function(done) {
+ s.ping(function() {
+ done();
+ });
+ });
+ });
+ describe('creating two streams and then using them in reverse order', function() {
+ it('should not result in non-monotonous local ID ordering', function() {
+ var s1 = c.createStream();
+ var s2 = c.createStream();
+ s2.headers({ ':method': 'get', ':path': '/' });
+ s1.headers({ ':method': 'get', ':path': '/' });
+ });
+ });
+ describe('creating two promises and then using them in reverse order', function() {
+ it('should not result in non-monotonous local ID ordering', function(done) {
+ s.on('stream', function(response) {
+ response.headers({ ':status': '200' });
+
+ var p1 = s.createStream();
+ var p2 = s.createStream();
+ response.promise(p2, { ':method': 'get', ':path': '/p2' });
+ response.promise(p1, { ':method': 'get', ':path': '/p1' });
+ p2.headers({ ':status': '200' });
+ p1.headers({ ':status': '200' });
+ });
+
+ var request = c.createStream();
+ request.headers({ ':method': 'get', ':path': '/' });
+
+ done = util.callNTimes(2, done);
+ request.on('promise', function() {
+ done();
+ });
+ });
+ });
+ describe('closing the connection on one end', function() {
+ it('should result in closed streams on both ends', function(done) {
+ done = util.callNTimes(2, done);
+ c.on('end', done);
+ s.on('end', done);
+
+ c.close();
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/endpoint.js b/testing/xpcshell/node-http2/test/endpoint.js
new file mode 100644
index 0000000000..bdd2569d42
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/endpoint.js
@@ -0,0 +1,41 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var endpoint = require('../lib/protocol/endpoint');
+var Endpoint = endpoint.Endpoint;
+
+var settings = {
+ SETTINGS_MAX_CONCURRENT_STREAMS: 100,
+ SETTINGS_INITIAL_WINDOW_SIZE: 100000
+};
+
+describe('endpoint.js', function() {
+ describe('scenario', function() {
+ describe('connection setup', function() {
+ it('should work as expected', function(done) {
+ var c = new Endpoint(util.log.child({ role: 'client' }), 'CLIENT', settings);
+ var s = new Endpoint(util.log.child({ role: 'client' }), 'SERVER', settings);
+
+ util.log.debug('Test initialization over, starting piping.');
+ c.pipe(s).pipe(c);
+
+ setTimeout(function() {
+ // If there are no exception until this, then we're done
+ done();
+ }, 10);
+ });
+ });
+ });
+ describe('bunyan serializer', function() {
+ describe('`e`', function() {
+ var format = endpoint.serializers.e;
+ it('should assign a unique ID to each endpoint', function() {
+ var c = new Endpoint(util.log.child({ role: 'client' }), 'CLIENT', settings);
+ var s = new Endpoint(util.log.child({ role: 'client' }), 'SERVER', settings);
+ expect(format(c)).to.not.equal(format(s));
+ expect(format(c)).to.equal(format(c));
+ expect(format(s)).to.equal(format(s));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/flow.js b/testing/xpcshell/node-http2/test/flow.js
new file mode 100644
index 0000000000..a64ab010c9
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/flow.js
@@ -0,0 +1,260 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var Flow = require('../lib/protocol/flow').Flow;
+
+var MAX_PAYLOAD_SIZE = 16384;
+
+function createFlow(log) {
+ var flowControlId = util.random(10, 100);
+ var flow = new Flow(flowControlId);
+ flow._log = util.log.child(log || {});
+ return flow;
+}
+
+describe('flow.js', function() {
+ describe('Flow class', function() {
+ var flow;
+ beforeEach(function() {
+ flow = createFlow();
+ });
+
+ describe('._receive(frame, callback) method', function() {
+ it('is called when there\'s a frame in the input buffer to be consumed', function(done) {
+ var frame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ flow._receive = function _receive(receivedFrame, callback) {
+ expect(receivedFrame).to.equal(frame);
+ callback();
+ };
+ flow.write(frame, done);
+ });
+ it('has to be overridden by the child class, otherwise it throws', function() {
+ expect(flow._receive.bind(flow)).to.throw(Error);
+ });
+ });
+ describe('._send() method', function() {
+ it('is called when the output buffer should be filled with more frames and the flow' +
+ 'control queue is empty', function() {
+ var notFlowControlledFrame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ flow._send = function _send() {
+ this.push(notFlowControlledFrame);
+ };
+ expect(flow.read()).to.equal(notFlowControlledFrame);
+
+ flow._window = 0;
+ flow._queue.push({ type: 'DATA', flags: {}, data: { length: 1 } });
+ var frame = flow.read();
+ while (frame.type === notFlowControlledFrame.type) frame = flow.read();
+ expect(frame.type).to.equal('BLOCKED');
+ expect(flow.read()).to.equal(null);
+ });
+ it('has to be overridden by the child class, otherwise it throws', function() {
+ expect(flow._send.bind(flow)).to.throw(Error);
+ });
+ });
+ describe('._increaseWindow(size) method', function() {
+ it('should increase `this._window` by `size`', function() {
+ flow._send = util.noop;
+ flow._window = 0;
+
+ var increase1 = util.random(0,100);
+ var increase2 = util.random(0,100);
+ flow._increaseWindow(increase1);
+ flow._increaseWindow(increase2);
+ expect(flow._window).to.equal(increase1 + increase2);
+
+ flow._increaseWindow(Infinity);
+ expect(flow._window).to.equal(Infinity);
+ });
+ it('should emit error when increasing with a finite `size` when `_window` is infinite', function() {
+ flow._send = util.noop;
+ flow._increaseWindow(Infinity);
+ var increase = util.random(1,100);
+
+ expect(flow._increaseWindow.bind(flow, increase)).to.throw('Uncaught, unspecified "error" event.');
+ });
+ it('should emit error when `_window` grows over the window limit', function() {
+ var WINDOW_SIZE_LIMIT = Math.pow(2, 31) - 1;
+ flow._send = util.noop;
+ flow._window = 0;
+
+ flow._increaseWindow(WINDOW_SIZE_LIMIT);
+ expect(flow._increaseWindow.bind(flow, 1)).to.throw('Uncaught, unspecified "error" event.');
+
+ });
+ });
+ describe('.read() method', function() {
+ describe('when the flow control queue is not empty', function() {
+ it('should return the first item in the queue if the window is enough', function() {
+ var priorityFrame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ var dataFrame = { type: 'DATA', flags: {}, data: { length: 10 } };
+ flow._send = util.noop;
+ flow._window = 10;
+ flow._queue = [priorityFrame, dataFrame];
+
+ expect(flow.read()).to.equal(priorityFrame);
+ expect(flow.read()).to.equal(dataFrame);
+ });
+ it('should also split DATA frames when needed', function() {
+ var buffer = Buffer.alloc(10);
+ var dataFrame = { type: 'DATA', flags: {}, stream: util.random(0, 100), data: buffer };
+ flow._send = util.noop;
+ flow._window = 5;
+ flow._queue = [dataFrame];
+
+ var expectedFragment = { flags: {}, type: 'DATA', stream: dataFrame.stream, data: buffer.slice(0,5) };
+ expect(flow.read()).to.deep.equal(expectedFragment);
+ expect(dataFrame.data).to.deep.equal(buffer.slice(5));
+ });
+ });
+ });
+ describe('.push(frame) method', function() {
+ it('should push `frame` into the output queue or the flow control queue', function() {
+ var priorityFrame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ var dataFrame = { type: 'DATA', flags: {}, data: { length: 10 } };
+ flow._window = 10;
+
+ flow.push(dataFrame); // output queue
+ flow.push(dataFrame); // flow control queue, because of depleted window
+ flow.push(priorityFrame); // flow control queue, because it's not empty
+
+ expect(flow.read()).to.be.equal(dataFrame);
+ expect(flow._queue[0]).to.be.equal(dataFrame);
+ expect(flow._queue[1]).to.be.equal(priorityFrame);
+ });
+ });
+ describe('.write() method', function() {
+ it('call with a DATA frame should trigger sending WINDOW_UPDATE if remote flow control is not' +
+ 'disabled', function(done) {
+ flow._window = 100;
+ flow._send = util.noop;
+ flow._receive = function(frame, callback) {
+ callback();
+ };
+
+ var buffer = Buffer.alloc(util.random(10, 100));
+ flow.write({ type: 'DATA', flags: {}, data: buffer });
+ flow.once('readable', function() {
+ expect(flow.read()).to.be.deep.equal({
+ type: 'WINDOW_UPDATE',
+ flags: {},
+ stream: flow._flowControlId,
+ window_size: buffer.length
+ });
+ done();
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ var flow1, flow2;
+ beforeEach(function() {
+ flow1 = createFlow({ flow: 1 });
+ flow2 = createFlow({ flow: 2 });
+ flow1._flowControlId = flow2._flowControlId;
+ flow1._send = flow2._send = util.noop;
+ flow1._receive = flow2._receive = function(frame, callback) { callback(); };
+ });
+
+ describe('sending a large data stream', function() {
+ it('should work as expected', function(done) {
+ // Sender side
+ var frameNumber = util.random(5, 8);
+ var input = [];
+ flow1._send = function _send() {
+ if (input.length >= frameNumber) {
+ this.push({ type: 'DATA', flags: { END_STREAM: true }, data: Buffer.alloc(0) });
+ this.push(null);
+ } else {
+ var buffer = Buffer.allocUnsafe(util.random(1000, 100000));
+ input.push(buffer);
+ this.push({ type: 'DATA', flags: {}, data: buffer });
+ }
+ };
+
+ // Receiver side
+ var output = [];
+ flow2._receive = function _receive(frame, callback) {
+ if (frame.type === 'DATA') {
+ expect(frame.data.length).to.be.lte(MAX_PAYLOAD_SIZE);
+ output.push(frame.data);
+ }
+ if (frame.flags.END_STREAM) {
+ this.emit('end_stream');
+ }
+ callback();
+ };
+
+ // Checking results
+ flow2.on('end_stream', function() {
+ input = util.concat(input);
+ output = util.concat(output);
+
+ expect(input).to.deep.equal(output);
+
+ done();
+ });
+
+ // Start piping
+ flow1.pipe(flow2).pipe(flow1);
+ });
+ });
+
+ describe('when running out of window', function() {
+ it('should send a BLOCKED frame', function(done) {
+ // Sender side
+ var frameNumber = util.random(5, 8);
+ var input = [];
+ flow1._send = function _send() {
+ if (input.length >= frameNumber) {
+ this.push({ type: 'DATA', flags: { END_STREAM: true }, data: Buffer.alloc(0) });
+ this.push(null);
+ } else {
+ var buffer = Buffer.allocUnsafe(util.random(1000, 100000));
+ input.push(buffer);
+ this.push({ type: 'DATA', flags: {}, data: buffer });
+ }
+ };
+
+ // Receiver side
+ // Do not send WINDOW_UPDATESs except when the other side sends BLOCKED
+ var output = [];
+ flow2._restoreWindow = util.noop;
+ flow2._receive = function _receive(frame, callback) {
+ if (frame.type === 'DATA') {
+ expect(frame.data.length).to.be.lte(MAX_PAYLOAD_SIZE);
+ output.push(frame.data);
+ }
+ if (frame.flags.END_STREAM) {
+ this.emit('end_stream');
+ }
+ if (frame.type === 'BLOCKED') {
+ setTimeout(function() {
+ this._push({
+ type: 'WINDOW_UPDATE',
+ flags: {},
+ stream: this._flowControlId,
+ window_size: this._received
+ });
+ this._received = 0;
+ }.bind(this), 20);
+ }
+ callback();
+ };
+
+ // Checking results
+ flow2.on('end_stream', function() {
+ input = util.concat(input);
+ output = util.concat(output);
+
+ expect(input).to.deep.equal(output);
+
+ done();
+ });
+
+ // Start piping
+ flow1.pipe(flow2).pipe(flow1);
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/framer.js b/testing/xpcshell/node-http2/test/framer.js
new file mode 100644
index 0000000000..e2eb693665
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/framer.js
@@ -0,0 +1,395 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var framer = require('../lib/protocol/framer');
+var Serializer = framer.Serializer;
+var Deserializer = framer.Deserializer;
+
+var frame_types = {
+ DATA: ['data'],
+ HEADERS: ['priority_information', 'data'],
+ PRIORITY: ['priority_information'],
+ RST_STREAM: ['error'],
+ SETTINGS: ['settings'],
+ PUSH_PROMISE: ['promised_stream', 'data'],
+ PING: ['data'],
+ GOAWAY: ['last_stream', 'error'],
+ WINDOW_UPDATE: ['window_size'],
+ CONTINUATION: ['data'],
+ ALTSVC: ['protocolID', 'host', 'port', 'origin', 'maxAge']
+};
+
+var test_frames = [{
+ frame: {
+ type: 'DATA',
+ flags: { END_STREAM: false, RESERVED2: false, RESERVED4: false,
+ PADDED: false },
+ stream: 10,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + content
+ buffer: Buffer.from('000004' + '00' + '00' + '0000000A' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: false, RESERVED5: false, PRIORITY: false },
+ stream: 15,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ buffer: Buffer.from('000004' + '01' + '00' + '0000000F' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: false, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: false,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ buffer: Buffer.from('000009' + '01' + '20' + '0000000F' + '0000000A' + '05' + '12345678', 'hex')
+
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: false, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: true,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ buffer: Buffer.from('000009' + '01' + '20' + '0000000F' + '8000000A' + '05' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'PRIORITY',
+ flags: { },
+ stream: 10,
+
+ priorityDependency: 9,
+ priorityWeight: 5,
+ exclusiveDependency: false
+ },
+ buffer: Buffer.from('000005' + '02' + '00' + '0000000A' + '00000009' + '05', 'hex')
+
+}, {
+ frame: {
+ type: 'PRIORITY',
+ flags: { },
+ stream: 10,
+
+ priorityDependency: 9,
+ priorityWeight: 5,
+ exclusiveDependency: true
+ },
+ buffer: Buffer.from('000005' + '02' + '00' + '0000000A' + '80000009' + '05', 'hex')
+
+}, {
+ frame: {
+ type: 'RST_STREAM',
+ flags: { },
+ stream: 10,
+
+ error: 'INTERNAL_ERROR'
+ },
+ buffer: Buffer.from('000004' + '03' + '00' + '0000000A' + '00000002', 'hex')
+
+}, {
+ frame: {
+ type: 'SETTINGS',
+ flags: { ACK: false },
+ stream: 10,
+
+ settings: {
+ SETTINGS_HEADER_TABLE_SIZE: 0x12345678,
+ SETTINGS_ENABLE_PUSH: true,
+ SETTINGS_MAX_CONCURRENT_STREAMS: 0x01234567,
+ SETTINGS_INITIAL_WINDOW_SIZE: 0x89ABCDEF,
+ SETTINGS_MAX_FRAME_SIZE: 0x00010000
+ }
+ },
+ buffer: Buffer.from('00001E' + '04' + '00' + '0000000A' + '0001' + '12345678' +
+ '0002' + '00000001' +
+ '0003' + '01234567' +
+ '0004' + '89ABCDEF' +
+ '0005' + '00010000', 'hex')
+
+}, {
+ frame: {
+ type: 'PUSH_PROMISE',
+ flags: { RESERVED1: false, RESERVED2: false, END_PUSH_PROMISE: false,
+ PADDED: false },
+ stream: 15,
+
+ promised_stream: 3,
+ data: Buffer.from('12345678', 'hex')
+ },
+ buffer: Buffer.from('000008' + '05' + '00' + '0000000F' + '00000003' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'PING',
+ flags: { ACK: false },
+ stream: 15,
+
+ data: Buffer.from('1234567887654321', 'hex')
+ },
+ buffer: Buffer.from('000008' + '06' + '00' + '0000000F' + '1234567887654321', 'hex')
+
+}, {
+ frame: {
+ type: 'GOAWAY',
+ flags: { },
+ stream: 10,
+
+ last_stream: 0x12345678,
+ error: 'PROTOCOL_ERROR'
+ },
+ buffer: Buffer.from('000008' + '07' + '00' + '0000000A' + '12345678' + '00000001', 'hex')
+
+}, {
+ frame: {
+ type: 'WINDOW_UPDATE',
+ flags: { },
+ stream: 10,
+
+ window_size: 0x12345678
+ },
+ buffer: Buffer.from('000004' + '08' + '00' + '0000000A' + '12345678', 'hex')
+}, {
+ frame: {
+ type: 'CONTINUATION',
+ flags: { RESERVED1: false, RESERVED2: false, END_HEADERS: true },
+ stream: 10,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + content
+ buffer: Buffer.from('000004' + '09' + '04' + '0000000A' + '12345678', 'hex')
+}, {
+ frame: {
+ type: 'ALTSVC',
+ flags: { },
+ stream: 0,
+
+ maxAge: 31536000,
+ port: 4443,
+ protocolID: "h2",
+ host: "altsvc.example.com",
+ origin: ""
+ },
+ buffer: Buffer.from(Buffer.from('00002B' + '0A' + '00' + '00000000' + '0000', 'hex') + Buffer.from('h2="altsvc.example.com:4443"; ma=31536000', 'ascii'))
+}, {
+ frame: {
+ type: 'ALTSVC',
+ flags: { },
+ stream: 0,
+
+ maxAge: 31536000,
+ port: 4443,
+ protocolID: "h2",
+ host: "altsvc.example.com",
+ origin: "https://onlyme.example.com"
+ },
+ buffer: Buffer.from(Buffer.from('000045' + '0A' + '00' + '00000000' + '001A', 'hex') + Buffer.from('https://onlyme.example.comh2="altsvc.example.com:4443"; ma=31536000', 'ascii'))
+
+}, {
+ frame: {
+ type: 'BLOCKED',
+ flags: { },
+ stream: 10
+ },
+ buffer: Buffer.from('000000' + '0B' + '00' + '0000000A', 'hex')
+}];
+
+var deserializer_test_frames = test_frames.slice(0);
+var padded_test_frames = [{
+ frame: {
+ type: 'DATA',
+ flags: { END_STREAM: false, RESERVED2: false, RESERVED4: false,
+ PADDED: true },
+ stream: 10,
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + content + padding
+ buffer: Buffer.from('00000B' + '00' + '08' + '0000000A' + '06' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: true, RESERVED5: false, PRIORITY: false },
+ stream: 15,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + data + padding
+ buffer: Buffer.from('00000B' + '01' + '08' + '0000000F' + '06' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: true, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: false,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + priority dependency + priority weight + data + padding
+ buffer: Buffer.from('000010' + '01' + '28' + '0000000F' + '06' + '0000000A' + '05' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: true, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: true,
+
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + priority dependency + priority weight + data + padding
+ buffer: Buffer.from('000010' + '01' + '28' + '0000000F' + '06' + '8000000A' + '05' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'PUSH_PROMISE',
+ flags: { RESERVED1: false, RESERVED2: false, END_PUSH_PROMISE: false,
+ PADDED: true },
+ stream: 15,
+
+ promised_stream: 3,
+ data: Buffer.from('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + promised stream + data + padding
+ buffer: Buffer.from('00000F' + '05' + '08' + '0000000F' + '06' + '00000003' + '12345678' + '000000000000', 'hex')
+
+}];
+for (var idx = 0; idx < padded_test_frames.length; idx++) {
+ deserializer_test_frames.push(padded_test_frames[idx]);
+}
+
+
+describe('framer.js', function() {
+ describe('Serializer', function() {
+ describe('static method .commonHeader({ type, flags, stream }, buffer_array)', function() {
+ it('should add the appropriate 9 byte header buffer in front of the others', function() {
+ for (var i = 0; i < test_frames.length; i++) {
+ var test = test_frames[i];
+ var buffers = [test.buffer.slice(9)];
+ var header_buffer = test.buffer.slice(0,9);
+ Serializer.commonHeader(test.frame, buffers);
+ expect(buffers[0]).to.deep.equal(header_buffer);
+ }
+ });
+ });
+
+ Object.keys(frame_types).forEach(function(type) {
+ var tests = test_frames.filter(function(test) { return test.frame.type === type; });
+ var frame_shape = '{ ' + frame_types[type].join(', ') + ' }';
+ describe('static method .' + type + '(' + frame_shape + ', buffer_array)', function() {
+ it('should push buffers to the array that make up a ' + type + ' type payload', function() {
+ for (var i = 0; i < tests.length; i++) {
+ var test = tests[i];
+ var buffers = [];
+ Serializer[type](test.frame, buffers);
+ expect(util.concat(buffers)).to.deep.equal(test.buffer.slice(9));
+ }
+ });
+ });
+ });
+
+ describe('transform stream', function() {
+ it('should transform frame objects to appropriate buffers', function() {
+ var stream = new Serializer(util.log);
+
+ for (var i = 0; i < test_frames.length; i++) {
+ var test = test_frames[i];
+ stream.write(test.frame);
+ var chunk, buffer = Buffer.alloc(0);
+ while (chunk = stream.read()) {
+ buffer = util.concat([buffer, chunk]);
+ }
+ expect(buffer).to.be.deep.equal(test.buffer);
+ }
+ });
+ });
+ });
+
+ describe('Deserializer', function() {
+ describe('static method .commonHeader(header_buffer, frame)', function() {
+ it('should augment the frame object with these properties: { type, flags, stream })', function() {
+ for (var i = 0; i < deserializer_test_frames.length; i++) {
+ var test = deserializer_test_frames[i], frame = {};
+ Deserializer.commonHeader(test.buffer.slice(0,9), frame);
+ expect(frame).to.deep.equal({
+ type: test.frame.type,
+ flags: test.frame.flags,
+ stream: test.frame.stream
+ });
+ }
+ });
+ });
+
+ Object.keys(frame_types).forEach(function(type) {
+ var tests = deserializer_test_frames.filter(function(test) { return test.frame.type === type; });
+ var frame_shape = '{ ' + frame_types[type].join(', ') + ' }';
+ describe('static method .' + type + '(payload_buffer, frame)', function() {
+ it('should augment the frame object with these properties: ' + frame_shape, function() {
+ for (var i = 0; i < tests.length; i++) {
+ var test = tests[i];
+ var frame = {
+ type: test.frame.type,
+ flags: test.frame.flags,
+ stream: test.frame.stream
+ };
+ Deserializer[type](test.buffer.slice(9), frame);
+ expect(frame).to.deep.equal(test.frame);
+ }
+ });
+ });
+ });
+
+ describe('transform stream', function() {
+ it('should transform buffers to appropriate frame object', function() {
+ var stream = new Deserializer(util.log);
+
+ var shuffled = util.shuffleBuffers(deserializer_test_frames.map(function(test) { return test.buffer; }));
+ shuffled.forEach(stream.write.bind(stream));
+
+ for (var j = 0; j < deserializer_test_frames.length; j++) {
+ expect(stream.read()).to.be.deep.equal(deserializer_test_frames[j].frame);
+ }
+ });
+ });
+ });
+
+ describe('bunyan formatter', function() {
+ describe('`frame`', function() {
+ var format = framer.serializers.frame;
+ it('should assign a unique ID to each frame', function() {
+ var frame1 = { type: 'DATA', data: Buffer.alloc(10) };
+ var frame2 = { type: 'PRIORITY', priority: 1 };
+ expect(format(frame1).id).to.be.equal(format(frame1));
+ expect(format(frame2).id).to.be.equal(format(frame2));
+ expect(format(frame1)).to.not.be.equal(format(frame2));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/http.js b/testing/xpcshell/node-http2/test/http.js
new file mode 100644
index 0000000000..95a074e4a0
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/http.js
@@ -0,0 +1,793 @@
+var expect = require('chai').expect;
+var util = require('./util');
+var fs = require('fs');
+var path = require('path');
+var url = require('url');
+var net = require('net');
+
+var http2 = require('../lib/http');
+var https = require('https');
+
+var serverOptions = {
+ key: fs.readFileSync(path.join(__dirname, '../example/localhost.key')),
+ cert: fs.readFileSync(path.join(__dirname, '../example/localhost.crt')),
+ rejectUnauthorized: true,
+ log: util.serverLog
+};
+
+var agentOptions = {
+ key: serverOptions.key,
+ ca: serverOptions.cert,
+ rejectUnauthorized: true,
+ log: util.clientLog
+};
+
+var globalAgent = new http2.Agent(agentOptions);
+
+describe('http.js', function() {
+ beforeEach(function() {
+ http2.globalAgent = globalAgent;
+ });
+ describe('Server', function() {
+ describe('new Server(options)', function() {
+ it('should throw if called without \'plain\' or TLS options', function() {
+ expect(function() {
+ new http2.Server();
+ }).to.throw(Error);
+ expect(function() {
+ http2.createServer(util.noop);
+ }).to.throw(Error);
+ });
+ });
+ describe('method `listen()`', function () {
+ it('should emit `listening` event', function (done) {
+ var server = http2.createServer(serverOptions);
+
+ server.on('listening', function () {
+ server.close();
+
+ done();
+ })
+
+ server.listen(0);
+ });
+ it('should emit `error` on failure', function (done) {
+ var server = http2.createServer(serverOptions);
+
+ // This TCP server is used to explicitly take a port to make
+ // server.listen() fails.
+ var net = require('net').createServer();
+
+ server.on('error', function () {
+ net.close()
+
+ done();
+ });
+
+ net.listen(0, function () {
+ server.listen(this.address().port);
+ });
+ });
+ });
+ describe('property `timeout`', function() {
+ it('should be a proxy for the backing HTTPS server\'s `timeout` property', function() {
+ var server = new http2.Server(serverOptions);
+ var backingServer = server._server;
+ var newTimeout = 10;
+ server.timeout = newTimeout;
+ expect(server.timeout).to.be.equal(newTimeout);
+ expect(backingServer.timeout).to.be.equal(newTimeout);
+ });
+ });
+ describe('method `setTimeout(timeout, [callback])`', function() {
+ it('should be a proxy for the backing HTTPS server\'s `setTimeout` method', function() {
+ var server = new http2.Server(serverOptions);
+ var backingServer = server._server;
+ var newTimeout = 10;
+ var newCallback = util.noop;
+ backingServer.setTimeout = function(timeout, callback) {
+ expect(timeout).to.be.equal(newTimeout);
+ expect(callback).to.be.equal(newCallback);
+ };
+ server.setTimeout(newTimeout, newCallback);
+ });
+ });
+ });
+ describe('Agent', function() {
+ describe('property `maxSockets`', function() {
+ it('should be a proxy for the backing HTTPS agent\'s `maxSockets` property', function() {
+ var agent = new http2.Agent({ log: util.clientLog });
+ var backingAgent = agent._httpsAgent;
+ var newMaxSockets = backingAgent.maxSockets + 1;
+ agent.maxSockets = newMaxSockets;
+ expect(agent.maxSockets).to.be.equal(newMaxSockets);
+ expect(backingAgent.maxSockets).to.be.equal(newMaxSockets);
+ });
+ });
+ describe('method `request(options, [callback])`', function() {
+ it('should use a new agent for request-specific TLS settings', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1234, function() {
+ var options = url.parse('https://localhost:1234' + path);
+ options.key = agentOptions.key;
+ options.ca = agentOptions.ca;
+ options.rejectUnauthorized = true;
+
+ http2.globalAgent = new http2.Agent({ log: util.clientLog });
+ http2.get(options, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ it('should throw when trying to use with \'http\' scheme', function() {
+ expect(function() {
+ var agent = new http2.Agent({ log: util.clientLog });
+ agent.request({ protocol: 'http:' });
+ }).to.throw(Error);
+ });
+ });
+ });
+ describe('OutgoingRequest', function() {
+ function testFallbackProxyMethod(name, originalArguments, done) {
+ var request = new http2.OutgoingRequest();
+
+ // When in HTTP/2 mode, this call should be ignored
+ request.stream = { reset: util.noop };
+ request[name].apply(request, originalArguments);
+ delete request.stream;
+
+ // When in fallback mode, this call should be forwarded
+ request[name].apply(request, originalArguments);
+ var mockFallbackRequest = { on: util.noop };
+ mockFallbackRequest[name] = function() {
+ expect(Array.prototype.slice.call(arguments)).to.deep.equal(originalArguments);
+ done();
+ };
+ request._fallback(mockFallbackRequest);
+ }
+ describe('method `setNoDelay(noDelay)`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `setNoDelay` method', function(done) {
+ testFallbackProxyMethod('setNoDelay', [true], done);
+ });
+ });
+ describe('method `setSocketKeepAlive(enable, initialDelay)`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `setSocketKeepAlive` method', function(done) {
+ testFallbackProxyMethod('setSocketKeepAlive', [true, util.random(10, 100)], done);
+ });
+ });
+ describe('method `setTimeout(timeout, [callback])`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `setTimeout` method', function(done) {
+ testFallbackProxyMethod('setTimeout', [util.random(10, 100), util.noop], done);
+ });
+ });
+ describe('method `abort()`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `abort` method', function(done) {
+ testFallbackProxyMethod('abort', [], done);
+ });
+ });
+ });
+ describe('OutgoingResponse', function() {
+ it('should throw error when writeHead is called multiple times on it', function() {
+ var called = false;
+ var stream = { _log: util.log, headers: function () {
+ if (called) {
+ throw new Error('Should not send headers twice');
+ } else {
+ called = true;
+ }
+ }, once: util.noop };
+ var response = new http2.OutgoingResponse(stream);
+
+ response.writeHead(200);
+ response.writeHead(404);
+ });
+ it('field finished should be Boolean', function(){
+ var stream = { _log: util.log, headers: function () {}, once: util.noop };
+ var response = new http2.OutgoingResponse(stream);
+ expect(response.finished).to.be.a('Boolean');
+ });
+ it('field finished should initially be false and then go to true when response completes',function(done){
+ var res;
+ var server = http2.createServer(serverOptions, function(request, response) {
+ res = response;
+ expect(res.finished).to.be.false;
+ response.end('HiThere');
+ });
+ server.listen(1236, function() {
+ http2.get('https://localhost:1236/finished-test', function(response) {
+ response.on('data', function(data){
+ var sink = data; //
+ });
+ response.on('end',function(){
+ expect(res.finished).to.be.true;
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ describe('simple request', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1234, function() {
+ http2.get('https://localhost:1234' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('2 simple request in parallel', function() {
+ it('should work as expected', function(originalDone) {
+ var path = '/x';
+ var message = 'Hello world';
+ var done = util.callNTimes(2, function() {
+ server.close();
+ originalDone();
+ });
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1234, function() {
+ http2.get('https://localhost:1234' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ http2.get('https://localhost:1234' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('100 simple request in a series', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ var n = 100;
+ server.listen(1242, function() {
+ doRequest();
+ function doRequest() {
+ http2.get('https://localhost:1242' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ if (n) {
+ n -= 1;
+ doRequest();
+ } else {
+ server.close();
+ done();
+ }
+ });
+ });
+ }
+ });
+ });
+ });
+ describe('request with payload', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ request.once('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ response.end();
+ });
+ });
+
+ server.listen(1240, function() {
+ var request = http2.request({
+ host: 'localhost',
+ port: 1240,
+ path: path
+ });
+ request.write(message);
+ request.end();
+ request.on('response', function() {
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ describe('request with custom status code and headers', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+ var headerName = 'name';
+ var headerValue = 'value';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ // Request URL and headers
+ expect(request.url).to.equal(path);
+ expect(request.headers[headerName]).to.equal(headerValue);
+
+ // A header to be overwritten later
+ response.setHeader(headerName, 'to be overwritten');
+ expect(response.getHeader(headerName)).to.equal('to be overwritten');
+
+ // A header to be deleted
+ response.setHeader('nonexistent', 'x');
+ response.removeHeader('nonexistent');
+ expect(response.getHeader('nonexistent')).to.equal(undefined);
+
+ // A set-cookie header which should always be an array
+ response.setHeader('set-cookie', 'foo');
+
+ // Don't send date
+ response.sendDate = false;
+
+ // Specifying more headers, the status code and a reason phrase with `writeHead`
+ var moreHeaders = {};
+ moreHeaders[headerName] = headerValue;
+ response.writeHead(600, 'to be discarded', moreHeaders);
+ expect(response.getHeader(headerName)).to.equal(headerValue);
+
+ // Empty response body
+ response.end(message);
+ });
+
+ server.listen(1239, function() {
+ var headers = {};
+ headers[headerName] = headerValue;
+ var request = http2.request({
+ host: 'localhost',
+ port: 1239,
+ path: path,
+ headers: headers
+ });
+ request.end();
+ request.on('response', function(response) {
+ expect(response.headers[headerName]).to.equal(headerValue);
+ expect(response.headers['nonexistent']).to.equal(undefined);
+ expect(response.headers['set-cookie']).to.an.instanceof(Array)
+ expect(response.headers['set-cookie']).to.deep.equal(['foo'])
+ expect(response.headers['date']).to.equal(undefined);
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('request over plain TCP', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.raw.createServer({
+ log: util.serverLog
+ }, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1237, function() {
+ var request = http2.raw.request({
+ plain: true,
+ host: 'localhost',
+ port: 1237,
+ path: path
+ }, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ request.end();
+ });
+ });
+ });
+ describe('get over plain TCP', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.raw.createServer({
+ log: util.serverLog
+ }, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1237, function() {
+ var request = http2.raw.get('http://localhost:1237/x', function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ request.end();
+ });
+ });
+ });
+ describe('request to an HTTPS/1 server', function() {
+ it('should fall back to HTTPS/1 successfully', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = https.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(5678, function() {
+ http2.get('https://localhost:5678' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('2 parallel request to an HTTPS/1 server', function() {
+ it('should fall back to HTTPS/1 successfully', function(originalDone) {
+ var path = '/x';
+ var message = 'Hello world';
+ var done = util.callNTimes(2, function() {
+ server.close();
+ originalDone();
+ });
+
+ var server = https.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(6789, function() {
+ http2.get('https://localhost:6789' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ http2.get('https://localhost:6789' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('HTTPS/1 request to a HTTP/2 server', function() {
+ it('should fall back to HTTPS/1 successfully', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1236, function() {
+ var options = url.parse('https://localhost:1236' + path);
+ options.agent = new https.Agent(agentOptions);
+ https.get(options, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('two parallel request', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1237, function() {
+ done = util.callNTimes(2, done);
+ // 1. request
+ http2.get('https://localhost:1237' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ // 2. request
+ http2.get('https://localhost:1237' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('two subsequent request', function() {
+ it('should use the same HTTP/2 connection', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1238, function() {
+ // 1. request
+ http2.get('https://localhost:1238' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+
+ // 2. request
+ http2.get('https://localhost:1238' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ });
+ });
+ describe('https server node module specification conformance', function() {
+ it('should provide API for remote HTTP 1.1 client address', function(done) {
+ var remoteAddress = null;
+ var remotePort = null;
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ // HTTPS 1.1 client with Node 0.10 server
+ if (!request.remoteAddress) {
+ if (request.socket.socket) {
+ remoteAddress = request.socket.socket.remoteAddress;
+ remotePort = request.socket.socket.remotePort;
+ } else {
+ remoteAddress = request.socket.remoteAddress;
+ remotePort = request.socket.remotePort;
+ }
+ } else {
+ // HTTPS 1.1/2.0 client with Node 0.12 server
+ remoteAddress = request.remoteAddress;
+ remotePort = request.remotePort;
+ }
+ response.write('Pong');
+ response.end();
+ });
+
+ server.listen(1259, 'localhost', function() {
+ var request = https.request({
+ host: 'localhost',
+ port: 1259,
+ path: '/',
+ ca: serverOptions.cert
+ });
+ request.write('Ping');
+ request.end();
+ request.on('response', function(response) {
+ response.on('data', function(data) {
+ var localAddress = response.socket.address();
+ expect(remoteAddress).to.equal(localAddress.address);
+ expect(remotePort).to.equal(localAddress.port);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ it('should provide API for remote HTTP 2.0 client address', function(done) {
+ var remoteAddress = null;
+ var remotePort = null;
+ var localAddress = null;
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ remoteAddress = request.remoteAddress;
+ remotePort = request.remotePort;
+ response.write('Pong');
+ response.end();
+ });
+
+ server.listen(1258, 'localhost', function() {
+ var request = http2.request({
+ host: 'localhost',
+ port: 1258,
+ path: '/'
+ });
+ request.write('Ping');
+ globalAgent.on('false:localhost:1258', function(endpoint) {
+ localAddress = endpoint.socket.address();
+ });
+ request.end();
+ request.on('response', function(response) {
+ response.on('data', function(data) {
+ expect(remoteAddress).to.equal(localAddress.address);
+ expect(remotePort).to.equal(localAddress.port);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ it('should expose net.Socket as .socket and .connection', function(done) {
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.socket).to.equal(request.connection);
+ expect(request.socket).to.be.instanceof(net.Socket);
+ response.write('Pong');
+ response.end();
+ done();
+ });
+
+ server.listen(1248, 'localhost', function() {
+ var request = https.request({
+ host: 'localhost',
+ port: 1248,
+ path: '/',
+ ca: serverOptions.cert
+ });
+ request.write('Ping');
+ request.end();
+ });
+ });
+ });
+ describe('request and response with trailers', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+ var requestTrailers = { 'content-md5': 'x' };
+ var responseTrailers = { 'content-md5': 'y' };
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ request.on('data', util.noop);
+ request.once('end', function() {
+ expect(request.trailers).to.deep.equal(requestTrailers);
+ response.write(message);
+ response.addTrailers(responseTrailers);
+ response.end();
+ });
+ });
+
+ server.listen(1241, function() {
+ var request = http2.request('https://localhost:1241' + path);
+ request.addTrailers(requestTrailers);
+ request.end();
+ request.on('response', function(response) {
+ response.on('data', util.noop);
+ response.once('end', function() {
+ expect(response.trailers).to.deep.equal(responseTrailers);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('Handle socket error', function () {
+ it('HTTPS on Connection Refused error', function (done) {
+ var path = '/x';
+ var request = http2.request('https://127.0.0.1:6666' + path);
+
+ request.on('error', function (err) {
+ expect(err.errno).to.equal('ECONNREFUSED');
+ done();
+ });
+
+ request.on('response', function (response) {
+ server._server._handle.destroy();
+
+ response.on('data', util.noop);
+
+ response.once('end', function () {
+ done(new Error('Request should have failed'));
+ });
+ });
+
+ request.end();
+
+ });
+ it('HTTP on Connection Refused error', function (done) {
+ var path = '/x';
+
+ var request = http2.raw.request('http://127.0.0.1:6666' + path);
+
+ request.on('error', function (err) {
+ expect(err.errno).to.equal('ECONNREFUSED');
+ done();
+ });
+
+ request.on('response', function (response) {
+ server._server._handle.destroy();
+
+ response.on('data', util.noop);
+
+ response.once('end', function () {
+ done(new Error('Request should have failed'));
+ });
+ });
+
+ request.end();
+ });
+ });
+ describe('server push', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+ var pushedPath = '/y';
+ var pushedMessage = 'Hello world 2';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ var push1 = response.push('/y');
+ push1.end(pushedMessage);
+ var push2 = response.push({ path: '/y', protocol: 'https:' });
+ push2.end(pushedMessage);
+ response.end(message);
+ });
+
+ server.listen(1235, function() {
+ var request = http2.get('https://localhost:1235' + path);
+ done = util.callNTimes(5, done);
+
+ request.on('response', function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ response.on('end', done);
+ });
+
+ request.on('push', function(promise) {
+ expect(promise.url).to.be.equal(pushedPath);
+ promise.on('response', function(pushStream) {
+ pushStream.on('data', function(data) {
+ expect(data.toString()).to.equal(pushedMessage);
+ done();
+ });
+ pushStream.on('end', done);
+ });
+ });
+ });
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/stream.js b/testing/xpcshell/node-http2/test/stream.js
new file mode 100644
index 0000000000..9e60932b8e
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/stream.js
@@ -0,0 +1,413 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var stream = require('../lib/protocol/stream');
+var Stream = stream.Stream;
+
+function createStream() {
+ var stream = new Stream(util.log, null);
+ stream.upstream._window = Infinity;
+ return stream;
+}
+
+// Execute a list of commands and assertions
+var recorded_events = ['state', 'error', 'window_update', 'headers', 'promise'];
+function execute_sequence(stream, sequence, done) {
+ if (!done) {
+ done = sequence;
+ sequence = stream;
+ stream = createStream();
+ }
+
+ var outgoing_frames = [];
+
+ var emit = stream.emit, events = [];
+ stream.emit = function(name) {
+ if (recorded_events.indexOf(name) !== -1) {
+ events.push({ name: name, data: Array.prototype.slice.call(arguments, 1) });
+ }
+ return emit.apply(this, arguments);
+ };
+
+ var commands = [], checks = [];
+ sequence.forEach(function(step) {
+ if ('method' in step || 'incoming' in step || 'outgoing' in step || 'wait' in step || 'set_state' in step) {
+ commands.push(step);
+ }
+
+ if ('outgoing' in step || 'event' in step || 'active' in step) {
+ checks.push(step);
+ }
+ });
+
+ var activeCount = 0;
+ function count_change(change) {
+ activeCount += change;
+ }
+
+ function execute(callback) {
+ var command = commands.shift();
+ if (command) {
+ if ('method' in command) {
+ var value = stream[command.method.name].apply(stream, command.method.arguments);
+ if (command.method.ret) {
+ command.method.ret(value);
+ }
+ execute(callback);
+ } else if ('incoming' in command) {
+ command.incoming.count_change = count_change;
+ stream.upstream.write(command.incoming);
+ execute(callback);
+ } else if ('outgoing' in command) {
+ outgoing_frames.push(stream.upstream.read());
+ execute(callback);
+ } else if ('set_state' in command) {
+ stream.state = command.set_state;
+ execute(callback);
+ } else if ('wait' in command) {
+ setTimeout(execute.bind(null, callback), command.wait);
+ } else {
+ throw new Error('Invalid command', command);
+ }
+ } else {
+ setTimeout(callback, 5);
+ }
+ }
+
+ function check() {
+ checks.forEach(function(check) {
+ if ('outgoing' in check) {
+ var frame = outgoing_frames.shift();
+ for (var key in check.outgoing) {
+ expect(frame).to.have.property(key).that.deep.equals(check.outgoing[key]);
+ }
+ count_change(frame.count_change);
+ } else if ('event' in check) {
+ var event = events.shift();
+ expect(event.name).to.be.equal(check.event.name);
+ check.event.data.forEach(function(data, index) {
+ expect(event.data[index]).to.deep.equal(data);
+ });
+ } else if ('active' in check) {
+ expect(activeCount).to.be.equal(check.active);
+ } else {
+ throw new Error('Invalid check', check);
+ }
+ });
+ done();
+ }
+
+ setImmediate(execute.bind(null, check));
+}
+
+var example_frames = [
+ { type: 'PRIORITY', flags: {}, priority: 1 },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'RST_STREAM', flags: {}, error: 'CANCEL' },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {}, promised_stream: new Stream(util.log, null) }
+];
+
+var invalid_incoming_frames = {
+ IDLE: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'RST_STREAM', flags: {}, error: 'CANCEL' }
+ ],
+ RESERVED_LOCAL: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ RESERVED_REMOTE: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ OPEN: [
+ ],
+ HALF_CLOSED_LOCAL: [
+ ],
+ HALF_CLOSED_REMOTE: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} }
+ ]
+};
+
+var invalid_outgoing_frames = {
+ IDLE: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} }
+ ],
+ RESERVED_LOCAL: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ RESERVED_REMOTE: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ OPEN: [
+ ],
+ HALF_CLOSED_LOCAL: [
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} }
+ ],
+ HALF_CLOSED_REMOTE: [
+ ],
+ CLOSED: [
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'DATA', flags: {}, data: Buffer.alloc(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {}, promised_stream: new Stream(util.log, null) }
+ ]
+};
+
+describe('stream.js', function() {
+ describe('Stream class', function() {
+ describe('._transition(sending, frame) method', function() {
+ it('should emit error, and answer RST_STREAM for invalid incoming frames', function() {
+ Object.keys(invalid_incoming_frames).forEach(function(state) {
+ invalid_incoming_frames[state].forEach(function(invalid_frame) {
+ var stream = createStream();
+ var connectionErrorHappened = false;
+ stream.state = state;
+ stream.once('connectionError', function() { connectionErrorHappened = true; });
+ stream._transition(false, invalid_frame);
+ expect(connectionErrorHappened);
+ });
+ });
+
+ // CLOSED state as a result of incoming END_STREAM (or RST_STREAM)
+ var stream = createStream();
+ stream.headers({});
+ stream.end();
+ stream.upstream.write({ type: 'HEADERS', headers:{}, flags: { END_STREAM: true }, count_change: util.noop });
+ example_frames.slice(2).forEach(function(invalid_frame) {
+ invalid_frame.count_change = util.noop;
+ expect(stream._transition.bind(stream, false, invalid_frame)).to.throw('Uncaught, unspecified "error" event.');
+ });
+
+ // CLOSED state as a result of outgoing END_STREAM
+ stream = createStream();
+ stream.upstream.write({ type: 'HEADERS', headers:{}, flags: { END_STREAM: true }, count_change: util.noop });
+ stream.headers({});
+ stream.end();
+ example_frames.slice(3).forEach(function(invalid_frame) {
+ invalid_frame.count_change = util.noop;
+ expect(stream._transition.bind(stream, false, invalid_frame)).to.throw('Uncaught, unspecified "error" event.');
+ });
+ });
+ it('should throw exception for invalid outgoing frames', function() {
+ Object.keys(invalid_outgoing_frames).forEach(function(state) {
+ invalid_outgoing_frames[state].forEach(function(invalid_frame) {
+ var stream = createStream();
+ stream.state = state;
+ expect(stream._transition.bind(stream, true, invalid_frame)).to.throw(Error);
+ });
+ });
+ });
+ it('should close the stream when there\'s an incoming or outgoing RST_STREAM', function() {
+ [
+ 'RESERVED_LOCAL',
+ 'RESERVED_REMOTE',
+ 'OPEN',
+ 'HALF_CLOSED_LOCAL',
+ 'HALF_CLOSED_REMOTE'
+ ].forEach(function(state) {
+ [true, false].forEach(function(sending) {
+ var stream = createStream();
+ stream.state = state;
+ stream._transition(sending, { type: 'RST_STREAM', flags: {} });
+ expect(stream.state).to.be.equal('CLOSED');
+ });
+ });
+ });
+ it('should ignore any incoming frame after sending reset', function() {
+ var stream = createStream();
+ stream.reset();
+ example_frames.forEach(stream._transition.bind(stream, false));
+ });
+ it('should ignore certain incoming frames after closing the stream with END_STREAM', function() {
+ var stream = createStream();
+ stream.upstream.write({ type: 'HEADERS', flags: { END_STREAM: true }, headers:{} });
+ stream.headers({});
+ stream.end();
+ example_frames.slice(0,3).forEach(function(frame) {
+ frame.count_change = util.noop;
+ stream._transition(false, frame);
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ describe('sending request', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ execute_sequence([
+ { method : { name: 'headers', arguments: [{ ':path': '/' }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+
+ { wait : 5 },
+ { method : { name: 'end', arguments: [] } },
+ { event : { name: 'state', data: ['HALF_CLOSED_LOCAL'] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: Buffer.alloc(0) } },
+
+ { wait : 10 },
+ { incoming: { type: 'HEADERS', flags: { }, headers: { ':status': 200 } } },
+ { incoming: { type: 'DATA' , flags: { END_STREAM: true }, data: Buffer.alloc(5) } },
+ { event : { name: 'headers', data: [{ ':status': 200 }] } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+ });
+ });
+ describe('answering request', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ var payload = Buffer.alloc(5);
+ execute_sequence([
+ { incoming: { type: 'HEADERS', flags: { }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+ { event : { name: 'headers', data: [{ ':path': '/' }] } },
+
+ { wait : 5 },
+ { incoming: { type: 'DATA', flags: { }, data: Buffer.alloc(5) } },
+ { incoming: { type: 'DATA', flags: { END_STREAM: true }, data: Buffer.alloc(10) } },
+ { event : { name: 'state', data: ['HALF_CLOSED_REMOTE'] } },
+
+ { wait : 5 },
+ { method : { name: 'headers', arguments: [{ ':status': 200 }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':status': 200 } } },
+
+ { wait : 5 },
+ { method : { name: 'end', arguments: [payload] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+ });
+ });
+ describe('sending push stream', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ var payload = Buffer.alloc(5);
+ var pushStream;
+
+ execute_sequence([
+ // receiving request
+ { incoming: { type: 'HEADERS', flags: { END_STREAM: true }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+ { event : { name: 'state', data: ['HALF_CLOSED_REMOTE'] } },
+ { event : { name: 'headers', data: [{ ':path': '/' }] } },
+
+ // sending response headers
+ { wait : 5 },
+ { method : { name: 'headers', arguments: [{ ':status': '200' }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':status': '200' } } },
+
+ // sending push promise
+ { method : { name: 'promise', arguments: [{ ':path': '/' }], ret: function(str) { pushStream = str; } } },
+ { outgoing: { type: 'PUSH_PROMISE', flags: { }, headers: { ':path': '/' } } },
+
+ // sending response data
+ { method : { name: 'end', arguments: [payload] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], function() {
+ // initial state of the promised stream
+ expect(pushStream.state).to.equal('RESERVED_LOCAL');
+
+ execute_sequence(pushStream, [
+ // push headers
+ { wait : 5 },
+ { method : { name: 'headers', arguments: [{ ':status': '200' }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':status': '200' } } },
+ { event : { name: 'state', data: ['HALF_CLOSED_REMOTE'] } },
+
+ // push data
+ { method : { name: 'end', arguments: [payload] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 1 }
+ ], done);
+ });
+ });
+ });
+ describe('receiving push stream', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ var payload = Buffer.alloc(5);
+ var original_stream = createStream();
+ var promised_stream = createStream();
+
+ done = util.callNTimes(2, done);
+
+ execute_sequence(original_stream, [
+ // sending request headers
+ { method : { name: 'headers', arguments: [{ ':path': '/' }] } },
+ { method : { name: 'end', arguments: [] } },
+ { outgoing: { type: 'HEADERS', flags: { END_STREAM: true }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+ { event : { name: 'state', data: ['HALF_CLOSED_LOCAL'] } },
+
+ // receiving response headers
+ { wait : 10 },
+ { incoming: { type: 'HEADERS', flags: { }, headers: { ':status': 200 } } },
+ { event : { name: 'headers', data: [{ ':status': 200 }] } },
+
+ // receiving push promise
+ { incoming: { type: 'PUSH_PROMISE', flags: { }, headers: { ':path': '/2.html' }, promised_stream: promised_stream } },
+ { event : { name: 'promise', data: [promised_stream, { ':path': '/2.html' }] } },
+
+ // receiving response data
+ { incoming: { type: 'DATA' , flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+
+ execute_sequence(promised_stream, [
+ // initial state of the promised stream
+ { event : { name: 'state', data: ['RESERVED_REMOTE'] } },
+
+ // push headers
+ { wait : 10 },
+ { incoming: { type: 'HEADERS', flags: { END_STREAM: false }, headers: { ':status': 200 } } },
+ { event : { name: 'state', data: ['HALF_CLOSED_LOCAL'] } },
+ { event : { name: 'headers', data: [{ ':status': 200 }] } },
+
+ // push data
+ { incoming: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+ });
+ });
+ });
+
+ describe('bunyan formatter', function() {
+ describe('`s`', function() {
+ var format = stream.serializers.s;
+ it('should assign a unique ID to each frame', function() {
+ var stream1 = createStream();
+ var stream2 = createStream();
+ expect(format(stream1)).to.be.equal(format(stream1));
+ expect(format(stream2)).to.be.equal(format(stream2));
+ expect(format(stream1)).to.not.be.equal(format(stream2));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/util.js b/testing/xpcshell/node-http2/test/util.js
new file mode 100644
index 0000000000..c612e38b14
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/util.js
@@ -0,0 +1,89 @@
+var path = require('path');
+var fs = require('fs');
+var spawn = require('child_process').spawn;
+
+function noop() {}
+exports.noop = noop;
+
+if (process.env.HTTP2_LOG) {
+ var logOutput = process.stderr;
+ if (process.stderr.isTTY) {
+ var bin = path.resolve(path.dirname(require.resolve('bunyan')), '..', 'bin', 'bunyan');
+ if(bin && fs.existsSync(bin)) {
+ logOutput = spawn(bin, ['-o', 'short'], {
+ stdio: [null, process.stderr, process.stderr]
+ }).stdin;
+ }
+ }
+ exports.createLogger = function(name) {
+ return require('bunyan').createLogger({
+ name: name,
+ stream: logOutput,
+ level: process.env.HTTP2_LOG,
+ serializers: require('../lib/http').serializers
+ });
+ };
+ exports.log = exports.createLogger('test');
+ exports.clientLog = exports.createLogger('client');
+ exports.serverLog = exports.createLogger('server');
+} else {
+ exports.createLogger = function() {
+ return exports.log;
+ };
+ exports.log = exports.clientLog = exports.serverLog = {
+ fatal: noop,
+ error: noop,
+ warn : noop,
+ info : noop,
+ debug: noop,
+ trace: noop,
+
+ child: function() { return this; }
+ };
+}
+
+exports.callNTimes = function callNTimes(limit, done) {
+ if (limit === 0) {
+ done();
+ } else {
+ var i = 0;
+ return function() {
+ i += 1;
+ if (i === limit) {
+ done();
+ }
+ };
+ }
+};
+
+// Concatenate an array of buffers into a new buffer
+exports.concat = function concat(buffers) {
+ var size = 0;
+ for (var i = 0; i < buffers.length; i++) {
+ size += buffers[i].length;
+ }
+
+ var concatenated = Buffer.alloc(size);
+ for (var cursor = 0, j = 0; j < buffers.length; cursor += buffers[j].length, j++) {
+ buffers[j].copy(concatenated, cursor);
+ }
+
+ return concatenated;
+};
+
+exports.random = function random(min, max) {
+ return min + Math.floor(Math.random() * (max - min + 1));
+};
+
+// Concatenate an array of buffers and then cut them into random size buffers
+exports.shuffleBuffers = function shuffleBuffers(buffers) {
+ var concatenated = exports.concat(buffers), output = [], written = 0;
+
+ while (written < concatenated.length) {
+ var chunk_size = Math.min(concatenated.length - written, Math.ceil(Math.random()*20));
+ output.push(concatenated.slice(written, written + chunk_size));
+ written += chunk_size;
+ }
+
+ return output;
+};
diff --git a/testing/xpcshell/node-ip/.gitignore b/testing/xpcshell/node-ip/.gitignore
new file mode 100644
index 0000000000..1ca957177f
--- /dev/null
+++ b/testing/xpcshell/node-ip/.gitignore
@@ -0,0 +1,2 @@
+node_modules/
+npm-debug.log
diff --git a/testing/xpcshell/node-ip/.jscsrc b/testing/xpcshell/node-ip/.jscsrc
new file mode 100644
index 0000000000..dbaae20574
--- /dev/null
+++ b/testing/xpcshell/node-ip/.jscsrc
@@ -0,0 +1,46 @@
+{
+ "disallowKeywordsOnNewLine": [ "else" ],
+ "disallowMixedSpacesAndTabs": true,
+ "disallowMultipleLineStrings": true,
+ "disallowMultipleVarDecl": true,
+ "disallowNewlineBeforeBlockStatements": true,
+ "disallowQuotedKeysInObjects": true,
+ "disallowSpaceAfterObjectKeys": true,
+ "disallowSpaceAfterPrefixUnaryOperators": true,
+ "disallowSpaceBeforePostfixUnaryOperators": true,
+ "disallowSpacesInCallExpression": true,
+ "disallowTrailingComma": true,
+ "disallowTrailingWhitespace": true,
+ "disallowYodaConditions": true,
+
+ "requireCommaBeforeLineBreak": true,
+ "requireOperatorBeforeLineBreak": true,
+ "requireSpaceAfterBinaryOperators": true,
+ "requireSpaceAfterKeywords": [ "if", "for", "while", "else", "try", "catch" ],
+ "requireSpaceAfterLineComment": true,
+ "requireSpaceBeforeBinaryOperators": true,
+ "requireSpaceBeforeBlockStatements": true,
+ "requireSpaceBeforeKeywords": [ "else", "catch" ],
+ "requireSpaceBeforeObjectValues": true,
+ "requireSpaceBetweenArguments": true,
+ "requireSpacesInAnonymousFunctionExpression": {
+ "beforeOpeningCurlyBrace": true
+ },
+ "requireSpacesInFunctionDeclaration": {
+ "beforeOpeningCurlyBrace": true
+ },
+ "requireSpacesInFunctionExpression": {
+ "beforeOpeningCurlyBrace": true
+ },
+ "requireSpacesInConditionalExpression": true,
+ "requireSpacesInForStatement": true,
+ "requireSpacesInsideArrayBrackets": "all",
+ "requireSpacesInsideObjectBrackets": "all",
+ "requireDotNotation": true,
+
+ "maximumLineLength": 80,
+ "validateIndentation": 2,
+ "validateLineBreaks": "LF",
+ "validateParameterSeparator": ", ",
+ "validateQuoteMarks": "'"
+}
diff --git a/testing/xpcshell/node-ip/.jshintrc b/testing/xpcshell/node-ip/.jshintrc
new file mode 100644
index 0000000000..7e97390295
--- /dev/null
+++ b/testing/xpcshell/node-ip/.jshintrc
@@ -0,0 +1,89 @@
+{
+ // JSHint Default Configuration File (as on JSHint website)
+ // See http://jshint.com/docs/ for more details
+
+ "maxerr" : 50, // {int} Maximum error before stopping
+
+ // Enforcing
+ "bitwise" : false, // true: Prohibit bitwise operators (&, |, ^, etc.)
+ "camelcase" : false, // true: Identifiers must be in camelCase
+ "curly" : false, // true: Require {} for every new block or scope
+ "eqeqeq" : true, // true: Require triple equals (===) for comparison
+ "forin" : true, // true: Require filtering for..in loops with obj.hasOwnProperty()
+ "freeze" : true, // true: prohibits overwriting prototypes of native objects such as Array, Date etc.
+ "immed" : false, // true: Require immediate invocations to be wrapped in parens e.g. `(function () { } ());`
+ "indent" : 2, // {int} Number of spaces to use for indentation
+ "latedef" : true, // true: Require variables/functions to be defined before being used
+ "newcap" : true, // true: Require capitalization of all constructor functions e.g. `new F()`
+ "noarg" : true, // true: Prohibit use of `arguments.caller` and `arguments.callee`
+ "noempty" : false, // true: Prohibit use of empty blocks
+ "nonbsp" : true, // true: Prohibit "non-breaking whitespace" characters.
+ "nonew" : false, // true: Prohibit use of constructors for side-effects (without assignment)
+ "plusplus" : false, // true: Prohibit use of `++` & `--`
+ "quotmark" : "single", // Quotation mark consistency:
+ // false : do nothing (default)
+ // true : ensure whatever is used is consistent
+ // "single" : require single quotes
+ // "double" : require double quotes
+ "undef" : true, // true: Require all non-global variables to be declared (prevents global leaks)
+ "unused" : true, // true: Require all defined variables be used
+ "strict" : true, // true: Requires all functions run in ES5 Strict Mode
+ "maxparams" : false, // {int} Max number of formal params allowed per function
+ "maxdepth" : 3, // {int} Max depth of nested blocks (within functions)
+ "maxstatements" : false, // {int} Max number statements per function
+ "maxcomplexity" : false, // {int} Max cyclomatic complexity per function
+ "maxlen" : false, // {int} Max number of characters per line
+
+ // Relaxing
+ "asi" : false, // true: Tolerate Automatic Semicolon Insertion (no semicolons)
+ "boss" : false, // true: Tolerate assignments where comparisons would be expected
+ "debug" : false, // true: Allow debugger statements e.g. browser breakpoints.
+ "eqnull" : false, // true: Tolerate use of `== null`
+ "es5" : false, // true: Allow ES5 syntax (ex: getters and setters)
+ "esnext" : false, // true: Allow ES.next (ES6) syntax (ex: `const`)
+ "moz" : false, // true: Allow Mozilla specific syntax (extends and overrides esnext features)
+ // (ex: `for each`, multiple try/catch, function expression…)
+ "evil" : false, // true: Tolerate use of `eval` and `new Function()`
+ "expr" : false, // true: Tolerate `ExpressionStatement` as Programs
+ "funcscope" : false, // true: Tolerate defining variables inside control statements
+ "globalstrict" : false, // true: Allow global "use strict" (also enables 'strict')
+ "iterator" : false, // true: Tolerate using the `__iterator__` property
+ "lastsemic" : false, // true: Tolerate omitting a semicolon for the last statement of a 1-line block
+ "laxbreak" : false, // true: Tolerate possibly unsafe line breakings
+ "laxcomma" : false, // true: Tolerate comma-first style coding
+ "loopfunc" : false, // true: Tolerate functions being defined in loops
+ "multistr" : false, // true: Tolerate multi-line strings
+ "noyield" : false, // true: Tolerate generator functions with no yield statement in them.
+ "notypeof" : false, // true: Tolerate invalid typeof operator values
+ "proto" : false, // true: Tolerate using the `__proto__` property
+ "scripturl" : false, // true: Tolerate script-targeted URLs
+ "shadow" : true, // true: Allows re-define variables later in code e.g. `var x=1; x=2;`
+ "sub" : false, // true: Tolerate using `[]` notation when it can still be expressed in dot notation
+ "supernew" : false, // true: Tolerate `new function () { ... };` and `new Object;`
+ "validthis" : false, // true: Tolerate using this in a non-constructor function
+
+ // Environments
+ "browser" : true, // Web Browser (window, document, etc)
+ "browserify" : true, // Browserify (node.js code in the browser)
+ "couch" : false, // CouchDB
+ "devel" : true, // Development/debugging (alert, confirm, etc)
+ "dojo" : false, // Dojo Toolkit
+ "jasmine" : false, // Jasmine
+ "jquery" : false, // jQuery
+ "mocha" : true, // Mocha
+ "mootools" : false, // MooTools
+ "node" : true, // Node.js
+ "nonstandard" : false, // Widely adopted globals (escape, unescape, etc)
+ "prototypejs" : false, // Prototype and Scriptaculous
+ "qunit" : false, // QUnit
+ "rhino" : false, // Rhino
+ "shelljs" : false, // ShellJS
+ "worker" : false, // Web Workers
+ "wsh" : false, // Windows Scripting Host
+ "yui" : false, // Yahoo User Interface
+
+ // Custom Globals
+ "globals" : {
+ "module": true
+ } // additional predefined global variables
+}
diff --git a/testing/xpcshell/node-ip/.travis.yml b/testing/xpcshell/node-ip/.travis.yml
new file mode 100644
index 0000000000..a3a8fad6b6
--- /dev/null
+++ b/testing/xpcshell/node-ip/.travis.yml
@@ -0,0 +1,15 @@
+sudo: false
+language: node_js
+node_js:
+ - "0.8"
+ - "0.10"
+ - "0.12"
+ - "4"
+ - "6"
+
+before_install:
+ - travis_retry npm install -g npm@2.14.5
+ - travis_retry npm install
+
+script:
+ - npm test
diff --git a/testing/xpcshell/node-ip/README.md b/testing/xpcshell/node-ip/README.md
new file mode 100644
index 0000000000..22e5819ffa
--- /dev/null
+++ b/testing/xpcshell/node-ip/README.md
@@ -0,0 +1,90 @@
+# IP
+[![](https://badge.fury.io/js/ip.svg)](https://www.npmjs.com/package/ip)
+
+IP address utilities for node.js
+
+## Installation
+
+### npm
+```shell
+npm install ip
+```
+
+### git
+
+```shell
+git clone https://github.com/indutny/node-ip.git
+```
+
+## Usage
+Get your ip address, compare ip addresses, validate ip addresses, etc.
+
+```js
+var ip = require('ip');
+
+ip.address() // my ip address
+ip.isEqual('::1', '::0:1'); // true
+ip.toBuffer('127.0.0.1') // Buffer([127, 0, 0, 1])
+ip.toString(new Buffer([127, 0, 0, 1])) // 127.0.0.1
+ip.fromPrefixLen(24) // 255.255.255.0
+ip.mask('192.168.1.134', '255.255.255.0') // 192.168.1.0
+ip.cidr('192.168.1.134/26') // 192.168.1.128
+ip.not('255.255.255.0') // 0.0.0.255
+ip.or('192.168.1.134', '0.0.0.255') // 192.168.1.255
+ip.isPrivate('127.0.0.1') // true
+ip.isV4Format('127.0.0.1'); // true
+ip.isV6Format('::ffff:127.0.0.1'); // true
+
+// operate on buffers in-place
+var buf = new Buffer(128);
+var offset = 64;
+ip.toBuffer('127.0.0.1', buf, offset); // [127, 0, 0, 1] at offset 64
+ip.toString(buf, offset, 4); // '127.0.0.1'
+
+// subnet information
+ip.subnet('192.168.1.134', '255.255.255.192')
+// { networkAddress: '192.168.1.128',
+// firstAddress: '192.168.1.129',
+// lastAddress: '192.168.1.190',
+// broadcastAddress: '192.168.1.191',
+// subnetMask: '255.255.255.192',
+// subnetMaskLength: 26,
+// numHosts: 62,
+// length: 64,
+// contains: function(addr){...} }
+ip.cidrSubnet('192.168.1.134/26')
+// Same as previous.
+
+// range checking
+ip.cidrSubnet('192.168.1.134/26').contains('192.168.1.190') // true
+
+
+// ipv4 long conversion
+ip.toLong('127.0.0.1'); // 2130706433
+ip.fromLong(2130706433); // '127.0.0.1'
+```
+
+### License
+
+This software is licensed under the MIT License.
+
+Copyright Fedor Indutny, 2012.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to permit
+persons to whom the Software is furnished to do so, subject to the
+following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
+NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/testing/xpcshell/node-ip/lib/ip.js b/testing/xpcshell/node-ip/lib/ip.js
new file mode 100644
index 0000000000..a66a2555c6
--- /dev/null
+++ b/testing/xpcshell/node-ip/lib/ip.js
@@ -0,0 +1,416 @@
+'use strict';
+
+var ip = exports;
+var Buffer = require('buffer').Buffer;
+var os = require('os');
+
+ip.toBuffer = function(ip, buff, offset) {
+ offset = ~~offset;
+
+ var result;
+
+ if (this.isV4Format(ip)) {
+ result = buff || Buffer.alloc(offset + 4);
+ ip.split(/\./g).map(function(byte) {
+ result[offset++] = parseInt(byte, 10) & 0xff;
+ });
+ } else if (this.isV6Format(ip)) {
+ var sections = ip.split(':', 8);
+
+ var i;
+ for (i = 0; i < sections.length; i++) {
+ var isv4 = this.isV4Format(sections[i]);
+ var v4Buffer;
+
+ if (isv4) {
+ v4Buffer = this.toBuffer(sections[i]);
+ sections[i] = v4Buffer.slice(0, 2).toString('hex');
+ }
+
+ if (v4Buffer && ++i < 8) {
+ sections.splice(i, 0, v4Buffer.slice(2, 4).toString('hex'));
+ }
+ }
+
+ if (sections[0] === '') {
+ while (sections.length < 8) sections.unshift('0');
+ } else if (sections[sections.length - 1] === '') {
+ while (sections.length < 8) sections.push('0');
+ } else if (sections.length < 8) {
+ for (i = 0; i < sections.length && sections[i] !== ''; i++);
+ var argv = [ i, 1 ];
+ for (i = 9 - sections.length; i > 0; i--) {
+ argv.push('0');
+ }
+ sections.splice.apply(sections, argv);
+ }
+
+ result = buff || Buffer.alloc(offset + 16);
+ for (i = 0; i < sections.length; i++) {
+ var word = parseInt(sections[i], 16);
+ result[offset++] = (word >> 8) & 0xff;
+ result[offset++] = word & 0xff;
+ }
+ }
+
+ if (!result) {
+ throw Error('Invalid ip address: ' + ip);
+ }
+
+ return result;
+};
+
+ip.toString = function(buff, offset, length) {
+ offset = ~~offset;
+ length = length || (buff.length - offset);
+
+ var result = [];
+ if (length === 4) {
+ // IPv4
+ for (var i = 0; i < length; i++) {
+ result.push(buff[offset + i]);
+ }
+ result = result.join('.');
+ } else if (length === 16) {
+ // IPv6
+ for (var i = 0; i < length; i += 2) {
+ result.push(buff.readUInt16BE(offset + i).toString(16));
+ }
+ result = result.join(':');
+ result = result.replace(/(^|:)0(:0)*:0(:|$)/, '$1::$3');
+ result = result.replace(/:{3,4}/, '::');
+ }
+
+ return result;
+};
+
+var ipv4Regex = /^(\d{1,3}\.){3,3}\d{1,3}$/;
+var ipv6Regex =
+ /^(::)?(((\d{1,3}\.){3}(\d{1,3}){1})?([0-9a-f]){0,4}:{0,2}){1,8}(::)?$/i;
+
+ip.isV4Format = function(ip) {
+ return ipv4Regex.test(ip);
+};
+
+ip.isV6Format = function(ip) {
+ return ipv6Regex.test(ip);
+};
+function _normalizeFamily(family) {
+ return family ? family.toLowerCase() : 'ipv4';
+}
+
+ip.fromPrefixLen = function(prefixlen, family) {
+ if (prefixlen > 32) {
+ family = 'ipv6';
+ } else {
+ family = _normalizeFamily(family);
+ }
+
+ var len = 4;
+ if (family === 'ipv6') {
+ len = 16;
+ }
+ var buff = Buffer.alloc(len);
+
+ for (var i = 0, n = buff.length; i < n; ++i) {
+ var bits = 8;
+ if (prefixlen < 8) {
+ bits = prefixlen;
+ }
+ prefixlen -= bits;
+
+ buff[i] = ~(0xff >> bits) & 0xff;
+ }
+
+ return ip.toString(buff);
+};
+
+ip.mask = function(addr, mask) {
+ addr = ip.toBuffer(addr);
+ mask = ip.toBuffer(mask);
+
+ var result = Buffer.alloc(Math.max(addr.length, mask.length));
+
+ var i = 0;
+ // Same protocol - do bitwise and
+ if (addr.length === mask.length) {
+ for (i = 0; i < addr.length; i++) {
+ result[i] = addr[i] & mask[i];
+ }
+ } else if (mask.length === 4) {
+ // IPv6 address and IPv4 mask
+ // (Mask low bits)
+ for (i = 0; i < mask.length; i++) {
+ result[i] = addr[addr.length - 4 + i] & mask[i];
+ }
+ } else {
+ // IPv6 mask and IPv4 addr
+ for (var i = 0; i < result.length - 6; i++) {
+ result[i] = 0;
+ }
+
+ // ::ffff:ipv4
+ result[10] = 0xff;
+ result[11] = 0xff;
+ for (i = 0; i < addr.length; i++) {
+ result[i + 12] = addr[i] & mask[i + 12];
+ }
+ i = i + 12;
+ }
+ for (; i < result.length; i++)
+ result[i] = 0;
+
+ return ip.toString(result);
+};
+
+ip.cidr = function(cidrString) {
+ var cidrParts = cidrString.split('/');
+
+ var addr = cidrParts[0];
+ if (cidrParts.length !== 2)
+ throw new Error('invalid CIDR subnet: ' + addr);
+
+ var mask = ip.fromPrefixLen(parseInt(cidrParts[1], 10));
+
+ return ip.mask(addr, mask);
+};
+
+ip.subnet = function(addr, mask) {
+ var networkAddress = ip.toLong(ip.mask(addr, mask));
+
+ // Calculate the mask's length.
+ var maskBuffer = ip.toBuffer(mask);
+ var maskLength = 0;
+
+ for (var i = 0; i < maskBuffer.length; i++) {
+ if (maskBuffer[i] === 0xff) {
+ maskLength += 8;
+ } else {
+ var octet = maskBuffer[i] & 0xff;
+ while (octet) {
+ octet = (octet << 1) & 0xff;
+ maskLength++;
+ }
+ }
+ }
+
+ var numberOfAddresses = Math.pow(2, 32 - maskLength);
+
+ return {
+ networkAddress: ip.fromLong(networkAddress),
+ firstAddress: numberOfAddresses <= 2 ?
+ ip.fromLong(networkAddress) :
+ ip.fromLong(networkAddress + 1),
+ lastAddress: numberOfAddresses <= 2 ?
+ ip.fromLong(networkAddress + numberOfAddresses - 1) :
+ ip.fromLong(networkAddress + numberOfAddresses - 2),
+ broadcastAddress: ip.fromLong(networkAddress + numberOfAddresses - 1),
+ subnetMask: mask,
+ subnetMaskLength: maskLength,
+ numHosts: numberOfAddresses <= 2 ?
+ numberOfAddresses : numberOfAddresses - 2,
+ length: numberOfAddresses,
+ contains: function(other) {
+ return networkAddress === ip.toLong(ip.mask(other, mask));
+ }
+ };
+};
+
+ip.cidrSubnet = function(cidrString) {
+ var cidrParts = cidrString.split('/');
+
+ var addr = cidrParts[0];
+ if (cidrParts.length !== 2)
+ throw new Error('invalid CIDR subnet: ' + addr);
+
+ var mask = ip.fromPrefixLen(parseInt(cidrParts[1], 10));
+
+ return ip.subnet(addr, mask);
+};
+
+ip.not = function(addr) {
+ var buff = ip.toBuffer(addr);
+ for (var i = 0; i < buff.length; i++) {
+ buff[i] = 0xff ^ buff[i];
+ }
+ return ip.toString(buff);
+};
+
+ip.or = function(a, b) {
+ a = ip.toBuffer(a);
+ b = ip.toBuffer(b);
+
+ // same protocol
+ if (a.length === b.length) {
+ for (var i = 0; i < a.length; ++i) {
+ a[i] |= b[i];
+ }
+ return ip.toString(a);
+
+ // mixed protocols
+ } else {
+ var buff = a;
+ var other = b;
+ if (b.length > a.length) {
+ buff = b;
+ other = a;
+ }
+
+ var offset = buff.length - other.length;
+ for (var i = offset; i < buff.length; ++i) {
+ buff[i] |= other[i - offset];
+ }
+
+ return ip.toString(buff);
+ }
+};
+
+ip.isEqual = function(a, b) {
+ a = ip.toBuffer(a);
+ b = ip.toBuffer(b);
+
+ // Same protocol
+ if (a.length === b.length) {
+ for (var i = 0; i < a.length; i++) {
+ if (a[i] !== b[i]) return false;
+ }
+ return true;
+ }
+
+ // Swap
+ if (b.length === 4) {
+ var t = b;
+ b = a;
+ a = t;
+ }
+
+ // a - IPv4, b - IPv6
+ for (var i = 0; i < 10; i++) {
+ if (b[i] !== 0) return false;
+ }
+
+ var word = b.readUInt16BE(10);
+ if (word !== 0 && word !== 0xffff) return false;
+
+ for (var i = 0; i < 4; i++) {
+ if (a[i] !== b[i + 12]) return false;
+ }
+
+ return true;
+};
+
+ip.isPrivate = function(addr) {
+ return /^(::f{4}:)?10\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$/i
+ .test(addr) ||
+ /^(::f{4}:)?192\.168\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) ||
+ /^(::f{4}:)?172\.(1[6-9]|2\d|30|31)\.([0-9]{1,3})\.([0-9]{1,3})$/i
+ .test(addr) ||
+ /^(::f{4}:)?127\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) ||
+ /^(::f{4}:)?169\.254\.([0-9]{1,3})\.([0-9]{1,3})$/i.test(addr) ||
+ /^f[cd][0-9a-f]{2}:/i.test(addr) ||
+ /^fe80:/i.test(addr) ||
+ /^::1$/.test(addr) ||
+ /^::$/.test(addr);
+};
+
+ip.isPublic = function(addr) {
+ return !ip.isPrivate(addr);
+};
+
+ip.isLoopback = function(addr) {
+ return /^(::f{4}:)?127\.([0-9]{1,3})\.([0-9]{1,3})\.([0-9]{1,3})/
+ .test(addr) ||
+ /^fe80::1$/.test(addr) ||
+ /^::1$/.test(addr) ||
+ /^::$/.test(addr);
+};
+
+ip.loopback = function(family) {
+ //
+ // Default to `ipv4`
+ //
+ family = _normalizeFamily(family);
+
+ if (family !== 'ipv4' && family !== 'ipv6') {
+ throw new Error('family must be ipv4 or ipv6');
+ }
+
+ return family === 'ipv4' ? '127.0.0.1' : 'fe80::1';
+};
+
+//
+// ### function address (name, family)
+// #### @name {string|'public'|'private'} **Optional** Name or security
+// of the network interface.
+// #### @family {ipv4|ipv6} **Optional** IP family of the address (defaults
+// to ipv4).
+//
+// Returns the address for the network interface on the current system with
+// the specified `name`:
+// * String: First `family` address of the interface.
+// If not found see `undefined`.
+// * 'public': the first public ip address of family.
+// * 'private': the first private ip address of family.
+// * undefined: First address with `ipv4` or loopback address `127.0.0.1`.
+//
+ip.address = function(name, family) {
+ var interfaces = os.networkInterfaces();
+ var all;
+
+ //
+ // Default to `ipv4`
+ //
+ family = _normalizeFamily(family);
+
+ //
+ // If a specific network interface has been named,
+ // return the address.
+ //
+ if (name && name !== 'private' && name !== 'public') {
+ var res = interfaces[name].filter(function(details) {
+ var itemFamily = details.family.toLowerCase();
+ return itemFamily === family;
+ });
+ if (res.length === 0)
+ return undefined;
+ return res[0].address;
+ }
+
+ var all = Object.keys(interfaces).map(function (nic) {
+ //
+ // Note: name will only be `public` or `private`
+ // when this is called.
+ //
+ var addresses = interfaces[nic].filter(function (details) {
+ details.family = details.family.toLowerCase();
+ if (details.family !== family || ip.isLoopback(details.address)) {
+ return false;
+ } else if (!name) {
+ return true;
+ }
+
+ return name === 'public' ? ip.isPrivate(details.address) :
+ ip.isPublic(details.address);
+ });
+
+ return addresses.length ? addresses[0].address : undefined;
+ }).filter(Boolean);
+
+ return !all.length ? ip.loopback(family) : all[0];
+};
+
+ip.toLong = function(ip) {
+ var ipl = 0;
+ ip.split('.').forEach(function(octet) {
+ ipl <<= 8;
+ ipl += parseInt(octet);
+ });
+ return(ipl >>> 0);
+};
+
+ip.fromLong = function(ipl) {
+ return ((ipl >>> 24) + '.' +
+ (ipl >> 16 & 255) + '.' +
+ (ipl >> 8 & 255) + '.' +
+ (ipl & 255) );
+};
diff --git a/testing/xpcshell/node-ip/package.json b/testing/xpcshell/node-ip/package.json
new file mode 100644
index 0000000000..c783fdd437
--- /dev/null
+++ b/testing/xpcshell/node-ip/package.json
@@ -0,0 +1,21 @@
+{
+ "name": "ip",
+ "version": "1.1.5",
+ "author": "Fedor Indutny <fedor@indutny.com>",
+ "homepage": "https://github.com/indutny/node-ip",
+ "repository": {
+ "type": "git",
+ "url": "http://github.com/indutny/node-ip.git"
+ },
+ "main": "lib/ip",
+ "devDependencies": {
+ "jscs": "^2.1.1",
+ "jshint": "^2.8.0",
+ "mocha": "~1.3.2"
+ },
+ "scripts": {
+ "test": "jscs lib/*.js test/*.js && jshint lib/*.js && mocha --reporter spec test/*-test.js",
+ "fix": "jscs lib/*.js test/*.js --fix"
+ },
+ "license": "MIT"
+}
diff --git a/testing/xpcshell/node-ip/test/api-test.js b/testing/xpcshell/node-ip/test/api-test.js
new file mode 100644
index 0000000000..1af09a4f6f
--- /dev/null
+++ b/testing/xpcshell/node-ip/test/api-test.js
@@ -0,0 +1,407 @@
+'use strict';
+
+var ip = require('..');
+var assert = require('assert');
+var net = require('net');
+var os = require('os');
+
+describe('IP library for node.js', function() {
+ describe('toBuffer()/toString() methods', function() {
+ it('should convert to buffer IPv4 address', function() {
+ var buf = ip.toBuffer('127.0.0.1');
+ assert.equal(buf.toString('hex'), '7f000001');
+ assert.equal(ip.toString(buf), '127.0.0.1');
+ });
+
+ it('should convert to buffer IPv4 address in-place', function() {
+ var buf = Buffer.alloc(128);
+ var offset = 64;
+ ip.toBuffer('127.0.0.1', buf, offset);
+ assert.equal(buf.toString('hex', offset, offset + 4), '7f000001');
+ assert.equal(ip.toString(buf, offset, 4), '127.0.0.1');
+ });
+
+ it('should convert to buffer IPv6 address', function() {
+ var buf = ip.toBuffer('::1');
+ assert(/(00){15,15}01/.test(buf.toString('hex')));
+ assert.equal(ip.toString(buf), '::1');
+ assert.equal(ip.toString(ip.toBuffer('1::')), '1::');
+ assert.equal(ip.toString(ip.toBuffer('abcd::dcba')), 'abcd::dcba');
+ });
+
+ it('should convert to buffer IPv6 address in-place', function() {
+ var buf = Buffer.alloc(128);
+ var offset = 64;
+ ip.toBuffer('::1', buf, offset);
+ assert(/(00){15,15}01/.test(buf.toString('hex', offset, offset + 16)));
+ assert.equal(ip.toString(buf, offset, 16), '::1');
+ assert.equal(ip.toString(ip.toBuffer('1::', buf, offset),
+ offset, 16), '1::');
+ assert.equal(ip.toString(ip.toBuffer('abcd::dcba', buf, offset),
+ offset, 16), 'abcd::dcba');
+ });
+
+ it('should convert to buffer IPv6 mapped IPv4 address', function() {
+ var buf = ip.toBuffer('::ffff:127.0.0.1');
+ assert.equal(buf.toString('hex'), '00000000000000000000ffff7f000001');
+ assert.equal(ip.toString(buf), '::ffff:7f00:1');
+
+ buf = ip.toBuffer('ffff::127.0.0.1');
+ assert.equal(buf.toString('hex'), 'ffff000000000000000000007f000001');
+ assert.equal(ip.toString(buf), 'ffff::7f00:1');
+
+ buf = ip.toBuffer('0:0:0:0:0:ffff:127.0.0.1');
+ assert.equal(buf.toString('hex'), '00000000000000000000ffff7f000001');
+ assert.equal(ip.toString(buf), '::ffff:7f00:1');
+ });
+ });
+
+ describe('fromPrefixLen() method', function() {
+ it('should create IPv4 mask', function() {
+ assert.equal(ip.fromPrefixLen(24), '255.255.255.0');
+ });
+ it('should create IPv6 mask', function() {
+ assert.equal(ip.fromPrefixLen(64), 'ffff:ffff:ffff:ffff::');
+ });
+ it('should create IPv6 mask explicitly', function() {
+ assert.equal(ip.fromPrefixLen(24, 'IPV6'), 'ffff:ff00::');
+ });
+ });
+
+ describe('not() method', function() {
+ it('should reverse bits in address', function() {
+ assert.equal(ip.not('255.255.255.0'), '0.0.0.255');
+ });
+ });
+
+ describe('or() method', function() {
+ it('should or bits in ipv4 addresses', function() {
+ assert.equal(ip.or('0.0.0.255', '192.168.1.10'), '192.168.1.255');
+ });
+ it('should or bits in ipv6 addresses', function() {
+ assert.equal(ip.or('::ff', '::abcd:dcba:abcd:dcba'),
+ '::abcd:dcba:abcd:dcff');
+ });
+ it('should or bits in mixed addresses', function() {
+ assert.equal(ip.or('0.0.0.255', '::abcd:dcba:abcd:dcba'),
+ '::abcd:dcba:abcd:dcff');
+ });
+ });
+
+ describe('mask() method', function() {
+ it('should mask bits in address', function() {
+ assert.equal(ip.mask('192.168.1.134', '255.255.255.0'), '192.168.1.0');
+ assert.equal(ip.mask('192.168.1.134', '::ffff:ff00'), '::ffff:c0a8:100');
+ });
+
+ it('should not leak data', function() {
+ for (var i = 0; i < 10; i++)
+ assert.equal(ip.mask('::1', '0.0.0.0'), '::');
+ });
+ });
+
+ describe('subnet() method', function() {
+ // Test cases calculated with http://www.subnet-calculator.com/
+ var ipv4Subnet = ip.subnet('192.168.1.134', '255.255.255.192');
+
+ it('should compute ipv4 network address', function() {
+ assert.equal(ipv4Subnet.networkAddress, '192.168.1.128');
+ });
+
+ it('should compute ipv4 network\'s first address', function() {
+ assert.equal(ipv4Subnet.firstAddress, '192.168.1.129');
+ });
+
+ it('should compute ipv4 network\'s last address', function() {
+ assert.equal(ipv4Subnet.lastAddress, '192.168.1.190');
+ });
+
+ it('should compute ipv4 broadcast address', function() {
+ assert.equal(ipv4Subnet.broadcastAddress, '192.168.1.191');
+ });
+
+ it('should compute ipv4 subnet number of addresses', function() {
+ assert.equal(ipv4Subnet.length, 64);
+ });
+
+ it('should compute ipv4 subnet number of addressable hosts', function() {
+ assert.equal(ipv4Subnet.numHosts, 62);
+ });
+
+ it('should compute ipv4 subnet mask', function() {
+ assert.equal(ipv4Subnet.subnetMask, '255.255.255.192');
+ });
+
+ it('should compute ipv4 subnet mask\'s length', function() {
+ assert.equal(ipv4Subnet.subnetMaskLength, 26);
+ });
+
+ it('should know whether a subnet contains an address', function() {
+ assert.equal(ipv4Subnet.contains('192.168.1.180'), true);
+ });
+
+ it('should know whether a subnet does not contain an address', function() {
+ assert.equal(ipv4Subnet.contains('192.168.1.195'), false);
+ });
+ });
+
+ describe('subnet() method with mask length 32', function() {
+ // Test cases calculated with http://www.subnet-calculator.com/
+ var ipv4Subnet = ip.subnet('192.168.1.134', '255.255.255.255');
+ it('should compute ipv4 network\'s first address', function() {
+ assert.equal(ipv4Subnet.firstAddress, '192.168.1.134');
+ });
+
+ it('should compute ipv4 network\'s last address', function() {
+ assert.equal(ipv4Subnet.lastAddress, '192.168.1.134');
+ });
+
+ it('should compute ipv4 subnet number of addressable hosts', function() {
+ assert.equal(ipv4Subnet.numHosts, 1);
+ });
+ });
+
+ describe('subnet() method with mask length 31', function() {
+ // Test cases calculated with http://www.subnet-calculator.com/
+ var ipv4Subnet = ip.subnet('192.168.1.134', '255.255.255.254');
+ it('should compute ipv4 network\'s first address', function() {
+ assert.equal(ipv4Subnet.firstAddress, '192.168.1.134');
+ });
+
+ it('should compute ipv4 network\'s last address', function() {
+ assert.equal(ipv4Subnet.lastAddress, '192.168.1.135');
+ });
+
+ it('should compute ipv4 subnet number of addressable hosts', function() {
+ assert.equal(ipv4Subnet.numHosts, 2);
+ });
+ });
+
+ describe('cidrSubnet() method', function() {
+ // Test cases calculated with http://www.subnet-calculator.com/
+ var ipv4Subnet = ip.cidrSubnet('192.168.1.134/26');
+
+ it('should compute an ipv4 network address', function() {
+ assert.equal(ipv4Subnet.networkAddress, '192.168.1.128');
+ });
+
+ it('should compute an ipv4 network\'s first address', function() {
+ assert.equal(ipv4Subnet.firstAddress, '192.168.1.129');
+ });
+
+ it('should compute an ipv4 network\'s last address', function() {
+ assert.equal(ipv4Subnet.lastAddress, '192.168.1.190');
+ });
+
+ it('should compute an ipv4 broadcast address', function() {
+ assert.equal(ipv4Subnet.broadcastAddress, '192.168.1.191');
+ });
+
+ it('should compute an ipv4 subnet number of addresses', function() {
+ assert.equal(ipv4Subnet.length, 64);
+ });
+
+ it('should compute an ipv4 subnet number of addressable hosts', function() {
+ assert.equal(ipv4Subnet.numHosts, 62);
+ });
+
+ it('should compute an ipv4 subnet mask', function() {
+ assert.equal(ipv4Subnet.subnetMask, '255.255.255.192');
+ });
+
+ it('should compute an ipv4 subnet mask\'s length', function() {
+ assert.equal(ipv4Subnet.subnetMaskLength, 26);
+ });
+
+ it('should know whether a subnet contains an address', function() {
+ assert.equal(ipv4Subnet.contains('192.168.1.180'), true);
+ });
+
+ it('should know whether a subnet contains an address', function() {
+ assert.equal(ipv4Subnet.contains('192.168.1.195'), false);
+ });
+
+ });
+
+ describe('cidr() method', function() {
+ it('should mask address in CIDR notation', function() {
+ assert.equal(ip.cidr('192.168.1.134/26'), '192.168.1.128');
+ assert.equal(ip.cidr('2607:f0d0:1002:51::4/56'), '2607:f0d0:1002::');
+ });
+ });
+
+ describe('isEqual() method', function() {
+ it('should check if addresses are equal', function() {
+ assert(ip.isEqual('127.0.0.1', '::7f00:1'));
+ assert(!ip.isEqual('127.0.0.1', '::7f00:2'));
+ assert(ip.isEqual('127.0.0.1', '::ffff:7f00:1'));
+ assert(!ip.isEqual('127.0.0.1', '::ffaf:7f00:1'));
+ assert(ip.isEqual('::ffff:127.0.0.1', '::ffff:127.0.0.1'));
+ assert(ip.isEqual('::ffff:127.0.0.1', '127.0.0.1'));
+ });
+ });
+
+
+ describe('isPrivate() method', function() {
+ it('should check if an address is localhost', function() {
+ assert.equal(ip.isPrivate('127.0.0.1'), true);
+ });
+
+ it('should check if an address is from a 192.168.x.x network', function() {
+ assert.equal(ip.isPrivate('192.168.0.123'), true);
+ assert.equal(ip.isPrivate('192.168.122.123'), true);
+ assert.equal(ip.isPrivate('192.162.1.2'), false);
+ });
+
+ it('should check if an address is from a 172.16.x.x network', function() {
+ assert.equal(ip.isPrivate('172.16.0.5'), true);
+ assert.equal(ip.isPrivate('172.16.123.254'), true);
+ assert.equal(ip.isPrivate('171.16.0.5'), false);
+ assert.equal(ip.isPrivate('172.25.232.15'), true);
+ assert.equal(ip.isPrivate('172.15.0.5'), false);
+ assert.equal(ip.isPrivate('172.32.0.5'), false);
+ });
+
+ it('should check if an address is from a 169.254.x.x network', function() {
+ assert.equal(ip.isPrivate('169.254.2.3'), true);
+ assert.equal(ip.isPrivate('169.254.221.9'), true);
+ assert.equal(ip.isPrivate('168.254.2.3'), false);
+ });
+
+ it('should check if an address is from a 10.x.x.x network', function() {
+ assert.equal(ip.isPrivate('10.0.2.3'), true);
+ assert.equal(ip.isPrivate('10.1.23.45'), true);
+ assert.equal(ip.isPrivate('12.1.2.3'), false);
+ });
+
+ it('should check if an address is from a private IPv6 network', function() {
+ assert.equal(ip.isPrivate('fd12:3456:789a:1::1'), true);
+ assert.equal(ip.isPrivate('fe80::f2de:f1ff:fe3f:307e'), true);
+ assert.equal(ip.isPrivate('::ffff:10.100.1.42'), true);
+ assert.equal(ip.isPrivate('::FFFF:172.16.200.1'), true);
+ assert.equal(ip.isPrivate('::ffff:192.168.0.1'), true);
+ });
+
+ it('should check if an address is from the internet', function() {
+ assert.equal(ip.isPrivate('165.225.132.33'), false); // joyent.com
+ });
+
+ it('should check if an address is a loopback IPv6 address', function() {
+ assert.equal(ip.isPrivate('::'), true);
+ assert.equal(ip.isPrivate('::1'), true);
+ assert.equal(ip.isPrivate('fe80::1'), true);
+ });
+ });
+
+ describe('loopback() method', function() {
+ describe('undefined', function() {
+ it('should respond with 127.0.0.1', function() {
+ assert.equal(ip.loopback(), '127.0.0.1')
+ });
+ });
+
+ describe('ipv4', function() {
+ it('should respond with 127.0.0.1', function() {
+ assert.equal(ip.loopback('ipv4'), '127.0.0.1')
+ });
+ });
+
+ describe('ipv6', function() {
+ it('should respond with fe80::1', function() {
+ assert.equal(ip.loopback('ipv6'), 'fe80::1')
+ });
+ });
+ });
+
+ describe('isLoopback() method', function() {
+ describe('127.0.0.1', function() {
+ it('should respond with true', function() {
+ assert.ok(ip.isLoopback('127.0.0.1'))
+ });
+ });
+
+ describe('127.8.8.8', function () {
+ it('should respond with true', function () {
+ assert.ok(ip.isLoopback('127.8.8.8'))
+ });
+ });
+
+ describe('8.8.8.8', function () {
+ it('should respond with false', function () {
+ assert.equal(ip.isLoopback('8.8.8.8'), false);
+ });
+ });
+
+ describe('fe80::1', function() {
+ it('should respond with true', function() {
+ assert.ok(ip.isLoopback('fe80::1'))
+ });
+ });
+
+ describe('::1', function() {
+ it('should respond with true', function() {
+ assert.ok(ip.isLoopback('::1'))
+ });
+ });
+
+ describe('::', function() {
+ it('should respond with true', function() {
+ assert.ok(ip.isLoopback('::'))
+ });
+ });
+ });
+
+ describe('address() method', function() {
+ describe('undefined', function() {
+ it('should respond with a private ip', function() {
+ assert.ok(ip.isPrivate(ip.address()));
+ });
+ });
+
+ describe('private', function() {
+ [ undefined, 'ipv4', 'ipv6' ].forEach(function(family) {
+ describe(family, function() {
+ it('should respond with a private ip', function() {
+ assert.ok(ip.isPrivate(ip.address('private', family)));
+ });
+ });
+ });
+ });
+
+ var interfaces = os.networkInterfaces();
+
+ Object.keys(interfaces).forEach(function(nic) {
+ describe(nic, function() {
+ [ undefined, 'ipv4' ].forEach(function(family) {
+ describe(family, function() {
+ it('should respond with an ipv4 address', function() {
+ var addr = ip.address(nic, family);
+ assert.ok(!addr || net.isIPv4(addr));
+ });
+ });
+ });
+
+ describe('ipv6', function() {
+ it('should respond with an ipv6 address', function() {
+ var addr = ip.address(nic, 'ipv6');
+ assert.ok(!addr || net.isIPv6(addr));
+ });
+ })
+ });
+ });
+ });
+
+ describe('toLong() method', function() {
+ it('should respond with a int', function() {
+ assert.equal(ip.toLong('127.0.0.1'), 2130706433);
+ assert.equal(ip.toLong('255.255.255.255'), 4294967295);
+ });
+ });
+
+ describe('fromLong() method', function() {
+ it('should repond with ipv4 address', function() {
+ assert.equal(ip.fromLong(2130706433), '127.0.0.1');
+ assert.equal(ip.fromLong(4294967295), '255.255.255.255');
+ });
+ })
+});
diff --git a/testing/xpcshell/node-ws/.eslintrc.yaml b/testing/xpcshell/node-ws/.eslintrc.yaml
new file mode 100644
index 0000000000..f3d983b9c8
--- /dev/null
+++ b/testing/xpcshell/node-ws/.eslintrc.yaml
@@ -0,0 +1,19 @@
+env:
+ browser: true
+ es6: true
+ mocha: true
+ node: true
+extends:
+ - eslint:recommended
+ - plugin:prettier/recommended
+parserOptions:
+ ecmaVersion: latest
+ sourceType: module
+rules:
+ no-console: off
+ no-var: error
+ prefer-const: error
+ quotes:
+ - error
+ - single
+ - avoidEscape: true
diff --git a/testing/xpcshell/node-ws/.gitignore b/testing/xpcshell/node-ws/.gitignore
new file mode 100644
index 0000000000..e37ab1e942
--- /dev/null
+++ b/testing/xpcshell/node-ws/.gitignore
@@ -0,0 +1,4 @@
+node_modules/
+.nyc_output/
+coverage/
+.vscode/
diff --git a/testing/xpcshell/node-ws/.npmrc b/testing/xpcshell/node-ws/.npmrc
new file mode 100644
index 0000000000..43c97e719a
--- /dev/null
+++ b/testing/xpcshell/node-ws/.npmrc
@@ -0,0 +1 @@
+package-lock=false
diff --git a/testing/xpcshell/node-ws/.prettierrc.yaml b/testing/xpcshell/node-ws/.prettierrc.yaml
new file mode 100644
index 0000000000..fe2f506e34
--- /dev/null
+++ b/testing/xpcshell/node-ws/.prettierrc.yaml
@@ -0,0 +1,5 @@
+arrowParens: always
+endOfLine: lf
+proseWrap: always
+singleQuote: true
+trailingComma: none
diff --git a/testing/xpcshell/node-ws/LICENSE b/testing/xpcshell/node-ws/LICENSE
new file mode 100644
index 0000000000..65ff176bf6
--- /dev/null
+++ b/testing/xpcshell/node-ws/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2011 Einar Otto Stangvik <einaros@gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/testing/xpcshell/node-ws/README.md b/testing/xpcshell/node-ws/README.md
new file mode 100644
index 0000000000..4ae71f6d06
--- /dev/null
+++ b/testing/xpcshell/node-ws/README.md
@@ -0,0 +1,495 @@
+# ws: a Node.js WebSocket library
+
+[![Version npm](https://img.shields.io/npm/v/ws.svg?logo=npm)](https://www.npmjs.com/package/ws)
+[![CI](https://img.shields.io/github/workflow/status/websockets/ws/CI/master?label=CI&logo=github)](https://github.com/websockets/ws/actions?query=workflow%3ACI+branch%3Amaster)
+[![Coverage Status](https://img.shields.io/coveralls/websockets/ws/master.svg?logo=coveralls)](https://coveralls.io/github/websockets/ws)
+
+ws is a simple to use, blazing fast, and thoroughly tested WebSocket client and
+server implementation.
+
+Passes the quite extensive Autobahn test suite: [server][server-report],
+[client][client-report].
+
+**Note**: This module does not work in the browser. The client in the docs is a
+reference to a back end with the role of a client in the WebSocket
+communication. Browser clients must use the native
+[`WebSocket`](https://developer.mozilla.org/en-US/docs/Web/API/WebSocket)
+object. To make the same code work seamlessly on Node.js and the browser, you
+can use one of the many wrappers available on npm, like
+[isomorphic-ws](https://github.com/heineiuo/isomorphic-ws).
+
+## Table of Contents
+
+- [Protocol support](#protocol-support)
+- [Installing](#installing)
+ - [Opt-in for performance](#opt-in-for-performance)
+- [API docs](#api-docs)
+- [WebSocket compression](#websocket-compression)
+- [Usage examples](#usage-examples)
+ - [Sending and receiving text data](#sending-and-receiving-text-data)
+ - [Sending binary data](#sending-binary-data)
+ - [Simple server](#simple-server)
+ - [External HTTP/S server](#external-https-server)
+ - [Multiple servers sharing a single HTTP/S server](#multiple-servers-sharing-a-single-https-server)
+ - [Client authentication](#client-authentication)
+ - [Server broadcast](#server-broadcast)
+ - [Round-trip time](#round-trip-time)
+ - [Use the Node.js streams API](#use-the-nodejs-streams-api)
+ - [Other examples](#other-examples)
+- [FAQ](#faq)
+ - [How to get the IP address of the client?](#how-to-get-the-ip-address-of-the-client)
+ - [How to detect and close broken connections?](#how-to-detect-and-close-broken-connections)
+ - [How to connect via a proxy?](#how-to-connect-via-a-proxy)
+- [Changelog](#changelog)
+- [License](#license)
+
+## Protocol support
+
+- **HyBi drafts 07-12** (Use the option `protocolVersion: 8`)
+- **HyBi drafts 13-17** (Current default, alternatively option
+ `protocolVersion: 13`)
+
+## Installing
+
+```
+npm install ws
+```
+
+### Opt-in for performance
+
+There are 2 optional modules that can be installed along side with the ws
+module. These modules are binary addons which improve certain operations.
+Prebuilt binaries are available for the most popular platforms so you don't
+necessarily need to have a C++ compiler installed on your machine.
+
+- `npm install --save-optional bufferutil`: Allows to efficiently perform
+ operations such as masking and unmasking the data payload of the WebSocket
+ frames.
+- `npm install --save-optional utf-8-validate`: Allows to efficiently check if a
+ message contains valid UTF-8.
+
+To not even try to require and use these modules, use the
+[`WS_NO_BUFFER_UTIL`](./doc/ws.md#ws_no_buffer_util) and
+[`WS_NO_UTF_8_VALIDATE`](./doc/ws.md#ws_no_utf_8_validate) environment
+variables. These might be useful to enhance security in systems where a user can
+put a package in the package search path of an application of another user, due
+to how the Node.js resolver algorithm works.
+
+## API docs
+
+See [`/doc/ws.md`](./doc/ws.md) for Node.js-like documentation of ws classes and
+utility functions.
+
+## WebSocket compression
+
+ws supports the [permessage-deflate extension][permessage-deflate] which enables
+the client and server to negotiate a compression algorithm and its parameters,
+and then selectively apply it to the data payloads of each WebSocket message.
+
+The extension is disabled by default on the server and enabled by default on the
+client. It adds a significant overhead in terms of performance and memory
+consumption so we suggest to enable it only if it is really needed.
+
+Note that Node.js has a variety of issues with high-performance compression,
+where increased concurrency, especially on Linux, can lead to [catastrophic
+memory fragmentation][node-zlib-bug] and slow performance. If you intend to use
+permessage-deflate in production, it is worthwhile to set up a test
+representative of your workload and ensure Node.js/zlib will handle it with
+acceptable performance and memory usage.
+
+Tuning of permessage-deflate can be done via the options defined below. You can
+also use `zlibDeflateOptions` and `zlibInflateOptions`, which is passed directly
+into the creation of [raw deflate/inflate streams][node-zlib-deflaterawdocs].
+
+See [the docs][ws-server-options] for more options.
+
+```js
+import WebSocket, { WebSocketServer } from 'ws';
+
+const wss = new WebSocketServer({
+ port: 8080,
+ perMessageDeflate: {
+ zlibDeflateOptions: {
+ // See zlib defaults.
+ chunkSize: 1024,
+ memLevel: 7,
+ level: 3
+ },
+ zlibInflateOptions: {
+ chunkSize: 10 * 1024
+ },
+ // Other options settable:
+ clientNoContextTakeover: true, // Defaults to negotiated value.
+ serverNoContextTakeover: true, // Defaults to negotiated value.
+ serverMaxWindowBits: 10, // Defaults to negotiated value.
+ // Below options specified as default values.
+ concurrencyLimit: 10, // Limits zlib concurrency for perf.
+ threshold: 1024 // Size (in bytes) below which messages
+ // should not be compressed if context takeover is disabled.
+ }
+});
+```
+
+The client will only use the extension if it is supported and enabled on the
+server. To always disable the extension on the client set the
+`perMessageDeflate` option to `false`.
+
+```js
+import WebSocket from 'ws';
+
+const ws = new WebSocket('ws://www.host.com/path', {
+ perMessageDeflate: false
+});
+```
+
+## Usage examples
+
+### Sending and receiving text data
+
+```js
+import WebSocket from 'ws';
+
+const ws = new WebSocket('ws://www.host.com/path');
+
+ws.on('open', function open() {
+ ws.send('something');
+});
+
+ws.on('message', function message(data) {
+ console.log('received: %s', data);
+});
+```
+
+### Sending binary data
+
+```js
+import WebSocket from 'ws';
+
+const ws = new WebSocket('ws://www.host.com/path');
+
+ws.on('open', function open() {
+ const array = new Float32Array(5);
+
+ for (var i = 0; i < array.length; ++i) {
+ array[i] = i / 2;
+ }
+
+ ws.send(array);
+});
+```
+
+### Simple server
+
+```js
+import { WebSocketServer } from 'ws';
+
+const wss = new WebSocketServer({ port: 8080 });
+
+wss.on('connection', function connection(ws) {
+ ws.on('message', function message(data) {
+ console.log('received: %s', data);
+ });
+
+ ws.send('something');
+});
+```
+
+### External HTTP/S server
+
+```js
+import { createServer } from 'https';
+import { readFileSync } from 'fs';
+import { WebSocketServer } from 'ws';
+
+const server = createServer({
+ cert: readFileSync('/path/to/cert.pem'),
+ key: readFileSync('/path/to/key.pem')
+});
+const wss = new WebSocketServer({ server });
+
+wss.on('connection', function connection(ws) {
+ ws.on('message', function message(data) {
+ console.log('received: %s', data);
+ });
+
+ ws.send('something');
+});
+
+server.listen(8080);
+```
+
+### Multiple servers sharing a single HTTP/S server
+
+```js
+import { createServer } from 'http';
+import { parse } from 'url';
+import { WebSocketServer } from 'ws';
+
+const server = createServer();
+const wss1 = new WebSocketServer({ noServer: true });
+const wss2 = new WebSocketServer({ noServer: true });
+
+wss1.on('connection', function connection(ws) {
+ // ...
+});
+
+wss2.on('connection', function connection(ws) {
+ // ...
+});
+
+server.on('upgrade', function upgrade(request, socket, head) {
+ const { pathname } = parse(request.url);
+
+ if (pathname === '/foo') {
+ wss1.handleUpgrade(request, socket, head, function done(ws) {
+ wss1.emit('connection', ws, request);
+ });
+ } else if (pathname === '/bar') {
+ wss2.handleUpgrade(request, socket, head, function done(ws) {
+ wss2.emit('connection', ws, request);
+ });
+ } else {
+ socket.destroy();
+ }
+});
+
+server.listen(8080);
+```
+
+### Client authentication
+
+```js
+import { createServer } from 'http';
+import { WebSocketServer } from 'ws';
+
+const server = createServer();
+const wss = new WebSocketServer({ noServer: true });
+
+wss.on('connection', function connection(ws, request, client) {
+ ws.on('message', function message(data) {
+ console.log(`Received message ${data} from user ${client}`);
+ });
+});
+
+server.on('upgrade', function upgrade(request, socket, head) {
+ // This function is not defined on purpose. Implement it with your own logic.
+ authenticate(request, function next(err, client) {
+ if (err || !client) {
+ socket.write('HTTP/1.1 401 Unauthorized\r\n\r\n');
+ socket.destroy();
+ return;
+ }
+
+ wss.handleUpgrade(request, socket, head, function done(ws) {
+ wss.emit('connection', ws, request, client);
+ });
+ });
+});
+
+server.listen(8080);
+```
+
+Also see the provided [example][session-parse-example] using `express-session`.
+
+### Server broadcast
+
+A client WebSocket broadcasting to all connected WebSocket clients, including
+itself.
+
+```js
+import WebSocket, { WebSocketServer } from 'ws';
+
+const wss = new WebSocketServer({ port: 8080 });
+
+wss.on('connection', function connection(ws) {
+ ws.on('message', function message(data, isBinary) {
+ wss.clients.forEach(function each(client) {
+ if (client.readyState === WebSocket.OPEN) {
+ client.send(data, { binary: isBinary });
+ }
+ });
+ });
+});
+```
+
+A client WebSocket broadcasting to every other connected WebSocket clients,
+excluding itself.
+
+```js
+import WebSocket, { WebSocketServer } from 'ws';
+
+const wss = new WebSocketServer({ port: 8080 });
+
+wss.on('connection', function connection(ws) {
+ ws.on('message', function message(data, isBinary) {
+ wss.clients.forEach(function each(client) {
+ if (client !== ws && client.readyState === WebSocket.OPEN) {
+ client.send(data, { binary: isBinary });
+ }
+ });
+ });
+});
+```
+
+### Round-trip time
+
+```js
+import WebSocket from 'ws';
+
+const ws = new WebSocket('wss://websocket-echo.com/');
+
+ws.on('open', function open() {
+ console.log('connected');
+ ws.send(Date.now());
+});
+
+ws.on('close', function close() {
+ console.log('disconnected');
+});
+
+ws.on('message', function message(data) {
+ console.log(`Round-trip time: ${Date.now() - data} ms`);
+
+ setTimeout(function timeout() {
+ ws.send(Date.now());
+ }, 500);
+});
+```
+
+### Use the Node.js streams API
+
+```js
+import WebSocket, { createWebSocketStream } from 'ws';
+
+const ws = new WebSocket('wss://websocket-echo.com/');
+
+const duplex = createWebSocketStream(ws, { encoding: 'utf8' });
+
+duplex.pipe(process.stdout);
+process.stdin.pipe(duplex);
+```
+
+### Other examples
+
+For a full example with a browser client communicating with a ws server, see the
+examples folder.
+
+Otherwise, see the test cases.
+
+## FAQ
+
+### How to get the IP address of the client?
+
+The remote IP address can be obtained from the raw socket.
+
+```js
+import { WebSocketServer } from 'ws';
+
+const wss = new WebSocketServer({ port: 8080 });
+
+wss.on('connection', function connection(ws, req) {
+ const ip = req.socket.remoteAddress;
+});
+```
+
+When the server runs behind a proxy like NGINX, the de-facto standard is to use
+the `X-Forwarded-For` header.
+
+```js
+wss.on('connection', function connection(ws, req) {
+ const ip = req.headers['x-forwarded-for'].split(',')[0].trim();
+});
+```
+
+### How to detect and close broken connections?
+
+Sometimes the link between the server and the client can be interrupted in a way
+that keeps both the server and the client unaware of the broken state of the
+connection (e.g. when pulling the cord).
+
+In these cases ping messages can be used as a means to verify that the remote
+endpoint is still responsive.
+
+```js
+import { WebSocketServer } from 'ws';
+
+function heartbeat() {
+ this.isAlive = true;
+}
+
+const wss = new WebSocketServer({ port: 8080 });
+
+wss.on('connection', function connection(ws) {
+ ws.isAlive = true;
+ ws.on('pong', heartbeat);
+});
+
+const interval = setInterval(function ping() {
+ wss.clients.forEach(function each(ws) {
+ if (ws.isAlive === false) return ws.terminate();
+
+ ws.isAlive = false;
+ ws.ping();
+ });
+}, 30000);
+
+wss.on('close', function close() {
+ clearInterval(interval);
+});
+```
+
+Pong messages are automatically sent in response to ping messages as required by
+the spec.
+
+Just like the server example above your clients might as well lose connection
+without knowing it. You might want to add a ping listener on your clients to
+prevent that. A simple implementation would be:
+
+```js
+import WebSocket from 'ws';
+
+function heartbeat() {
+ clearTimeout(this.pingTimeout);
+
+ // Use `WebSocket#terminate()`, which immediately destroys the connection,
+ // instead of `WebSocket#close()`, which waits for the close timer.
+ // Delay should be equal to the interval at which your server
+ // sends out pings plus a conservative assumption of the latency.
+ this.pingTimeout = setTimeout(() => {
+ this.terminate();
+ }, 30000 + 1000);
+}
+
+const client = new WebSocket('wss://websocket-echo.com/');
+
+client.on('open', heartbeat);
+client.on('ping', heartbeat);
+client.on('close', function clear() {
+ clearTimeout(this.pingTimeout);
+});
+```
+
+### How to connect via a proxy?
+
+Use a custom `http.Agent` implementation like [https-proxy-agent][] or
+[socks-proxy-agent][].
+
+## Changelog
+
+We're using the GitHub [releases][changelog] for changelog entries.
+
+## License
+
+[MIT](LICENSE)
+
+[changelog]: https://github.com/websockets/ws/releases
+[client-report]: http://websockets.github.io/ws/autobahn/clients/
+[https-proxy-agent]: https://github.com/TooTallNate/node-https-proxy-agent
+[node-zlib-bug]: https://github.com/nodejs/node/issues/8871
+[node-zlib-deflaterawdocs]:
+ https://nodejs.org/api/zlib.html#zlib_zlib_createdeflateraw_options
+[permessage-deflate]: https://tools.ietf.org/html/rfc7692
+[server-report]: http://websockets.github.io/ws/autobahn/servers/
+[session-parse-example]: ./examples/express-session-parse
+[socks-proxy-agent]: https://github.com/TooTallNate/node-socks-proxy-agent
+[ws-server-options]: ./doc/ws.md#new-websocketserveroptions-callback
diff --git a/testing/xpcshell/node-ws/SECURITY.md b/testing/xpcshell/node-ws/SECURITY.md
new file mode 100644
index 0000000000..0baf19a63b
--- /dev/null
+++ b/testing/xpcshell/node-ws/SECURITY.md
@@ -0,0 +1,39 @@
+# Security Guidelines
+
+Please contact us directly at **security@3rd-Eden.com** for any bug that might
+impact the security of this project. Please prefix the subject of your email
+with `[security]` in lowercase and square brackets. Our email filters will
+automatically prevent these messages from being moved to our spam box.
+
+You will receive an acknowledgement of your report within **24 hours**.
+
+All emails that do not include security vulnerabilities will be removed and
+blocked instantly.
+
+## Exceptions
+
+If you do not receive an acknowledgement within the said time frame please give
+us the benefit of the doubt as it's possible that we haven't seen it yet. In
+this case please send us a message **without details** using one of the
+following methods:
+
+- Contact the lead developers of this project on their personal e-mails. You can
+ find the e-mails in the git logs, for example using the following command:
+ `git --no-pager show -s --format='%an <%ae>' <gitsha>` where `<gitsha>` is the
+ SHA1 of their latest commit in the project.
+- Create a GitHub issue stating contact details and the severity of the issue.
+
+Once we have acknowledged receipt of your report and confirmed the bug ourselves
+we will work with you to fix the vulnerability and publicly acknowledge your
+responsible disclosure, if you wish. In addition to that we will create and
+publish a security advisory to
+[GitHub Security Advisories](https://github.com/websockets/ws/security/advisories?state=published).
+
+## History
+
+- 04 Jan 2016:
+ [Buffer vulnerability](https://github.com/websockets/ws/releases/tag/1.0.1)
+- 08 Nov 2017:
+ [DoS vulnerability](https://github.com/websockets/ws/releases/tag/3.3.1)
+- 25 May 2021:
+ [ReDoS in `Sec-Websocket-Protocol` header](https://github.com/websockets/ws/releases/tag/7.4.6)
diff --git a/testing/xpcshell/node-ws/bench/parser.benchmark.js b/testing/xpcshell/node-ws/bench/parser.benchmark.js
new file mode 100644
index 0000000000..a6e359d05d
--- /dev/null
+++ b/testing/xpcshell/node-ws/bench/parser.benchmark.js
@@ -0,0 +1,95 @@
+'use strict';
+
+const benchmark = require('benchmark');
+const crypto = require('crypto');
+
+const WebSocket = require('..');
+
+const Receiver = WebSocket.Receiver;
+const Sender = WebSocket.Sender;
+
+const options = {
+ fin: true,
+ rsv1: false,
+ mask: true,
+ readOnly: false
+};
+
+function createBinaryFrame(length) {
+ const list = Sender.frame(crypto.randomBytes(length), {
+ opcode: 0x02,
+ ...options
+ });
+
+ return Buffer.concat(list);
+}
+
+const pingFrame1 = Buffer.concat(
+ Sender.frame(crypto.randomBytes(5), { opcode: 0x09, ...options })
+);
+
+const textFrame = Buffer.from('819461616161' + '61'.repeat(20), 'hex');
+const pingFrame2 = Buffer.from('8980146e915a', 'hex');
+const binaryFrame1 = createBinaryFrame(125);
+const binaryFrame2 = createBinaryFrame(65535);
+const binaryFrame3 = createBinaryFrame(200 * 1024);
+const binaryFrame4 = createBinaryFrame(1024 * 1024);
+
+const suite = new benchmark.Suite();
+const receiver = new Receiver({
+ binaryType: 'nodebuffer',
+ extensions: {},
+ isServer: true,
+ skipUTF8Validation: false
+});
+
+suite.add('ping frame (5 bytes payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(pingFrame1, deferred.resolve.bind(deferred));
+ }
+});
+suite.add('ping frame (no payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(pingFrame2, deferred.resolve.bind(deferred));
+ }
+});
+suite.add('text frame (20 bytes payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(textFrame, deferred.resolve.bind(deferred));
+ }
+});
+suite.add('binary frame (125 bytes payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(binaryFrame1, deferred.resolve.bind(deferred));
+ }
+});
+suite.add('binary frame (65535 bytes payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(binaryFrame2, deferred.resolve.bind(deferred));
+ }
+});
+suite.add('binary frame (200 KiB payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(binaryFrame3, deferred.resolve.bind(deferred));
+ }
+});
+suite.add('binary frame (1 MiB payload)', {
+ defer: true,
+ fn: (deferred) => {
+ receiver.write(binaryFrame4, deferred.resolve.bind(deferred));
+ }
+});
+
+suite.on('cycle', (e) => console.log(e.target.toString()));
+
+if (require.main === module) {
+ suite.run({ async: true });
+} else {
+ module.exports = suite;
+}
diff --git a/testing/xpcshell/node-ws/bench/sender.benchmark.js b/testing/xpcshell/node-ws/bench/sender.benchmark.js
new file mode 100644
index 0000000000..89d3be24b0
--- /dev/null
+++ b/testing/xpcshell/node-ws/bench/sender.benchmark.js
@@ -0,0 +1,48 @@
+'use strict';
+
+const benchmark = require('benchmark');
+const crypto = require('crypto');
+
+const Sender = require('../').Sender;
+
+const data1 = crypto.randomBytes(64);
+const data2 = crypto.randomBytes(16 * 1024);
+const data3 = crypto.randomBytes(64 * 1024);
+const data4 = crypto.randomBytes(200 * 1024);
+const data5 = crypto.randomBytes(1024 * 1024);
+
+const opts1 = {
+ readOnly: false,
+ mask: false,
+ rsv1: false,
+ opcode: 2,
+ fin: true
+};
+const opts2 = {
+ readOnly: true,
+ rsv1: false,
+ mask: true,
+ opcode: 2,
+ fin: true
+};
+
+const suite = new benchmark.Suite();
+
+suite.add('frame, unmasked (64 B)', () => Sender.frame(data1, opts1));
+suite.add('frame, masked (64 B)', () => Sender.frame(data1, opts2));
+suite.add('frame, unmasked (16 KiB)', () => Sender.frame(data2, opts1));
+suite.add('frame, masked (16 KiB)', () => Sender.frame(data2, opts2));
+suite.add('frame, unmasked (64 KiB)', () => Sender.frame(data3, opts1));
+suite.add('frame, masked (64 KiB)', () => Sender.frame(data3, opts2));
+suite.add('frame, unmasked (200 KiB)', () => Sender.frame(data4, opts1));
+suite.add('frame, masked (200 KiB)', () => Sender.frame(data4, opts2));
+suite.add('frame, unmasked (1 MiB)', () => Sender.frame(data5, opts1));
+suite.add('frame, masked (1 MiB)', () => Sender.frame(data5, opts2));
+
+suite.on('cycle', (e) => console.log(e.target.toString()));
+
+if (require.main === module) {
+ suite.run({ async: true });
+} else {
+ module.exports = suite;
+}
diff --git a/testing/xpcshell/node-ws/bench/speed.js b/testing/xpcshell/node-ws/bench/speed.js
new file mode 100644
index 0000000000..bef6a30679
--- /dev/null
+++ b/testing/xpcshell/node-ws/bench/speed.js
@@ -0,0 +1,115 @@
+'use strict';
+
+const cluster = require('cluster');
+const http = require('http');
+
+const WebSocket = require('..');
+
+const port = 8181;
+const path = '';
+// const path = '/tmp/wss.sock';
+
+if (cluster.isMaster) {
+ const server = http.createServer();
+ const wss = new WebSocket.Server({
+ maxPayload: 600 * 1024 * 1024,
+ perMessageDeflate: false,
+ clientTracking: false,
+ server
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (data, isBinary) => {
+ ws.send(data, { binary: isBinary });
+ });
+ });
+
+ server.listen(path ? { path } : { port }, () => cluster.fork());
+
+ cluster.on('exit', () => {
+ wss.close();
+ server.close();
+ });
+} else {
+ const configs = [
+ [true, 10000, 64],
+ [true, 5000, 16 * 1024],
+ [true, 1000, 128 * 1024],
+ [true, 100, 1024 * 1024],
+ [true, 1, 500 * 1024 * 1024],
+ [false, 10000, 64],
+ [false, 5000, 16 * 1024],
+ [false, 1000, 128 * 1024],
+ [false, 100, 1024 * 1024]
+ ];
+
+ const roundPrec = (num, prec) => {
+ const mul = Math.pow(10, prec);
+ return Math.round(num * mul) / mul;
+ };
+
+ const humanSize = (bytes) => {
+ if (bytes >= 1073741824) return roundPrec(bytes / 1073741824, 2) + ' GiB';
+ if (bytes >= 1048576) return roundPrec(bytes / 1048576, 2) + ' MiB';
+ if (bytes >= 1024) return roundPrec(bytes / 1024, 2) + ' KiB';
+ return roundPrec(bytes, 2) + ' B';
+ };
+
+ const largest = configs.reduce(
+ (prev, curr) => (curr[2] > prev ? curr[2] : prev),
+ 0
+ );
+ console.log('Generating %s of test data...', humanSize(largest));
+ const randomBytes = Buffer.allocUnsafe(largest);
+
+ for (let i = 0; i < largest; ++i) {
+ randomBytes[i] = ~~(Math.random() * 127);
+ }
+
+ console.log(`Testing ws on ${path || '[::]:' + port}`);
+
+ const runConfig = (useBinary, roundtrips, size, cb) => {
+ const data = randomBytes.slice(0, size);
+ const url = path ? `ws+unix://${path}` : `ws://localhost:${port}`;
+ const ws = new WebSocket(url, {
+ maxPayload: 600 * 1024 * 1024
+ });
+ let roundtrip = 0;
+ let time;
+
+ ws.on('error', (err) => {
+ console.error(err.stack);
+ cluster.worker.disconnect();
+ });
+ ws.on('open', () => {
+ time = process.hrtime();
+ ws.send(data, { binary: useBinary });
+ });
+ ws.on('message', () => {
+ if (++roundtrip !== roundtrips)
+ return ws.send(data, { binary: useBinary });
+
+ let elapsed = process.hrtime(time);
+ elapsed = elapsed[0] * 1e9 + elapsed[1];
+
+ console.log(
+ '%d roundtrips of %s %s data:\t%ss\t%s',
+ roundtrips,
+ humanSize(size),
+ useBinary ? 'binary' : 'text',
+ roundPrec(elapsed / 1e9, 1),
+ humanSize(((size * 2 * roundtrips) / elapsed) * 1e9) + '/s'
+ );
+
+ ws.close();
+ cb();
+ });
+ };
+
+ (function run() {
+ if (configs.length === 0) return cluster.worker.disconnect();
+ const config = configs.shift();
+ config.push(run);
+ runConfig.apply(null, config);
+ })();
+}
diff --git a/testing/xpcshell/node-ws/browser.js b/testing/xpcshell/node-ws/browser.js
new file mode 100644
index 0000000000..ca4f628ac1
--- /dev/null
+++ b/testing/xpcshell/node-ws/browser.js
@@ -0,0 +1,8 @@
+'use strict';
+
+module.exports = function () {
+ throw new Error(
+ 'ws does not work in the browser. Browser clients must use the native ' +
+ 'WebSocket object'
+ );
+};
diff --git a/testing/xpcshell/node-ws/doc/ws.md b/testing/xpcshell/node-ws/doc/ws.md
new file mode 100644
index 0000000000..4888ee4c6f
--- /dev/null
+++ b/testing/xpcshell/node-ws/doc/ws.md
@@ -0,0 +1,669 @@
+# ws
+
+## Table of Contents
+
+- [Class: WebSocketServer](#class-websocketserver)
+ - [new WebSocketServer(options[, callback])](#new-websocketserveroptions-callback)
+ - [Event: 'close'](#event-close)
+ - [Event: 'connection'](#event-connection)
+ - [Event: 'error'](#event-error)
+ - [Event: 'headers'](#event-headers)
+ - [Event: 'listening'](#event-listening)
+ - [Event: 'wsClientError'](#event-wsclienterror)
+ - [server.address()](#serveraddress)
+ - [server.clients](#serverclients)
+ - [server.close([callback])](#serverclosecallback)
+ - [server.handleUpgrade(request, socket, head, callback)](#serverhandleupgraderequest-socket-head-callback)
+ - [server.shouldHandle(request)](#servershouldhandlerequest)
+- [Class: WebSocket](#class-websocket)
+ - [Ready state constants](#ready-state-constants)
+ - [new WebSocket(address[, protocols][, options])](#new-websocketaddress-protocols-options)
+ - [UNIX Domain Sockets](#unix-domain-sockets)
+ - [Event: 'close'](#event-close-1)
+ - [Event: 'error'](#event-error-1)
+ - [Event: 'message'](#event-message)
+ - [Event: 'open'](#event-open)
+ - [Event: 'ping'](#event-ping)
+ - [Event: 'pong'](#event-pong)
+ - [Event: 'redirect'](#event-redirect)
+ - [Event: 'unexpected-response'](#event-unexpected-response)
+ - [Event: 'upgrade'](#event-upgrade)
+ - [websocket.addEventListener(type, listener[, options])](#websocketaddeventlistenertype-listener-options)
+ - [websocket.binaryType](#websocketbinarytype)
+ - [websocket.bufferedAmount](#websocketbufferedamount)
+ - [websocket.close([code[, reason]])](#websocketclosecode-reason)
+ - [websocket.extensions](#websocketextensions)
+ - [websocket.isPaused](#websocketispaused)
+ - [websocket.onclose](#websocketonclose)
+ - [websocket.onerror](#websocketonerror)
+ - [websocket.onmessage](#websocketonmessage)
+ - [websocket.onopen](#websocketonopen)
+ - [websocket.pause()](#websocketpause)
+ - [websocket.ping([data[, mask]][, callback])](#websocketpingdata-mask-callback)
+ - [websocket.pong([data[, mask]][, callback])](#websocketpongdata-mask-callback)
+ - [websocket.protocol](#websocketprotocol)
+ - [websocket.readyState](#websocketreadystate)
+ - [websocket.removeEventListener(type, listener)](#websocketremoveeventlistenertype-listener)
+ - [websocket.resume()](#websocketresume)
+ - [websocket.send(data[, options][, callback])](#websocketsenddata-options-callback)
+ - [websocket.terminate()](#websocketterminate)
+ - [websocket.url](#websocketurl)
+- [createWebSocketStream(websocket[, options])](#createwebsocketstreamwebsocket-options)
+- [Environment variables](#environment-variables)
+ - [WS_NO_BUFFER_UTIL](#ws_no_buffer_util)
+ - [WS_NO_UTF_8_VALIDATE](#ws_no_utf_8_validate)
+- [Error codes](#error-codes)
+ - [WS_ERR_EXPECTED_FIN](#ws_err_expected_fin)
+ - [WS_ERR_EXPECTED_MASK](#ws_err_expected_mask)
+ - [WS_ERR_INVALID_CLOSE_CODE](#ws_err_invalid_close_code)
+ - [WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH](#ws_err_invalid_control_payload_length)
+ - [WS_ERR_INVALID_OPCODE](#ws_err_invalid_opcode)
+ - [WS_ERR_INVALID_UTF8](#ws_err_invalid_utf8)
+ - [WS_ERR_UNEXPECTED_MASK](#ws_err_unexpected_mask)
+ - [WS_ERR_UNEXPECTED_RSV_1](#ws_err_unexpected_rsv_1)
+ - [WS_ERR_UNEXPECTED_RSV_2_3](#ws_err_unexpected_rsv_2_3)
+ - [WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH](#ws_err_unsupported_data_payload_length)
+ - [WS_ERR_UNSUPPORTED_MESSAGE_LENGTH](#ws_err_unsupported_message_length)
+
+## Class: WebSocketServer
+
+This class represents a WebSocket server. It extends the `EventEmitter`.
+
+### new WebSocketServer(options[, callback])
+
+- `options` {Object}
+ - `backlog` {Number} The maximum length of the queue of pending connections.
+ - `clientTracking` {Boolean} Specifies whether or not to track clients.
+ - `handleProtocols` {Function} A function which can be used to handle the
+ WebSocket subprotocols. See description below.
+ - `host` {String} The hostname where to bind the server.
+ - `maxPayload` {Number} The maximum allowed message size in bytes. Defaults to
+ 100 MiB (104857600 bytes).
+ - `noServer` {Boolean} Enable no server mode.
+ - `path` {String} Accept only connections matching this path.
+ - `perMessageDeflate` {Boolean|Object} Enable/disable permessage-deflate.
+ - `port` {Number} The port where to bind the server.
+ - `server` {http.Server|https.Server} A pre-created Node.js HTTP/S server.
+ - `skipUTF8Validation` {Boolean} Specifies whether or not to skip UTF-8
+ validation for text and close messages. Defaults to `false`. Set to `true`
+ only if clients are trusted.
+ - `verifyClient` {Function} A function which can be used to validate incoming
+ connections. See description below. (Usage is discouraged: see
+ [Issue #337](https://github.com/websockets/ws/issues/377#issuecomment-462152231))
+ - `WebSocket` {Function} Specifies the `WebSocket` class to be used. It must
+ be extended from the original `WebSocket`. Defaults to `WebSocket`.
+- `callback` {Function}
+
+Create a new server instance. One and only one of `port`, `server` or `noServer`
+must be provided or an error is thrown. An HTTP server is automatically created,
+started, and used if `port` is set. To use an external HTTP/S server instead,
+specify only `server` or `noServer`. In this case the HTTP/S server must be
+started manually. The "noServer" mode allows the WebSocket server to be
+completely detached from the HTTP/S server. This makes it possible, for example,
+to share a single HTTP/S server between multiple WebSocket servers.
+
+> **NOTE:** Use of `verifyClient` is discouraged. Rather handle client
+> authentication in the `upgrade` event of the HTTP server. See examples for
+> more details.
+
+If `verifyClient` is not set then the handshake is automatically accepted. If it
+has a single parameter then `ws` will invoke it with the following argument:
+
+- `info` {Object}
+ - `origin` {String} The value in the Origin header indicated by the client.
+ - `req` {http.IncomingMessage} The client HTTP GET request.
+ - `secure` {Boolean} `true` if `req.socket.authorized` or
+ `req.socket.encrypted` is set.
+
+The return value (`Boolean`) of the function determines whether or not to accept
+the handshake.
+
+If `verifyClient` has two parameters then `ws` will invoke it with the following
+arguments:
+
+- `info` {Object} Same as above.
+- `cb` {Function} A callback that must be called by the user upon inspection of
+ the `info` fields. Arguments in this callback are:
+ - `result` {Boolean} Whether or not to accept the handshake.
+ - `code` {Number} When `result` is `false` this field determines the HTTP
+ error status code to be sent to the client.
+ - `name` {String} When `result` is `false` this field determines the HTTP
+ reason phrase.
+ - `headers` {Object} When `result` is `false` this field determines additional
+ HTTP headers to be sent to the client. For example,
+ `{ 'Retry-After': 120 }`.
+
+`handleProtocols` takes two arguments:
+
+- `protocols` {Set} The list of WebSocket subprotocols indicated by the client
+ in the `Sec-WebSocket-Protocol` header.
+- `request` {http.IncomingMessage} The client HTTP GET request.
+
+The returned value sets the value of the `Sec-WebSocket-Protocol` header in the
+HTTP 101 response. If returned value is `false` the header is not added in the
+response.
+
+If `handleProtocols` is not set then the first of the client's requested
+subprotocols is used.
+
+`perMessageDeflate` can be used to control the behavior of [permessage-deflate
+extension][permessage-deflate]. The extension is disabled when `false` (default
+value). If an object is provided then that is extension parameters:
+
+- `serverNoContextTakeover` {Boolean} Whether to use context takeover or not.
+- `clientNoContextTakeover` {Boolean} Acknowledge disabling of client context
+ takeover.
+- `serverMaxWindowBits` {Number} The value of `windowBits`.
+- `clientMaxWindowBits` {Number} Request a custom client window size.
+- `zlibDeflateOptions` {Object} [Additional options][zlib-options] to pass to
+ zlib on deflate.
+- `zlibInflateOptions` {Object} [Additional options][zlib-options] to pass to
+ zlib on inflate.
+- `threshold` {Number} Payloads smaller than this will not be compressed if
+ context takeover is disabled. Defaults to 1024 bytes.
+- `concurrencyLimit` {Number} The number of concurrent calls to zlib. Calls
+ above this limit will be queued. Default 10. You usually won't need to touch
+ this option. See [this issue][concurrency-limit] for more details.
+
+If a property is empty then either an offered configuration or a default value
+is used. When sending a fragmented message the length of the first fragment is
+compared to the threshold. This determines if compression is used for the entire
+message.
+
+`callback` will be added as a listener for the `listening` event on the HTTP
+server when not operating in "noServer" mode.
+
+### Event: 'close'
+
+Emitted when the server closes. This event depends on the `'close'` event of
+HTTP server only when it is created internally. In all other cases, the event is
+emitted independently.
+
+### Event: 'connection'
+
+- `websocket` {WebSocket}
+- `request` {http.IncomingMessage}
+
+Emitted when the handshake is complete. `request` is the http GET request sent
+by the client. Useful for parsing authority headers, cookie headers, and other
+information.
+
+### Event: 'error'
+
+- `error` {Error}
+
+Emitted when an error occurs on the underlying server.
+
+### Event: 'headers'
+
+- `headers` {Array}
+- `request` {http.IncomingMessage}
+
+Emitted before the response headers are written to the socket as part of the
+handshake. This allows you to inspect/modify the headers before they are sent.
+
+### Event: 'listening'
+
+Emitted when the underlying server has been bound.
+
+### Event: 'wsClientError'
+
+- `error` {Error}
+- `socket` {net.Socket|tls.Socket}
+- `request` {http.IncomingMessage}
+
+Emitted when an error occurs before the WebSocket connection is established.
+`socket` and `request` are respectively the socket and the HTTP request from
+which the error originated. The listener of this event is responsible for
+closing the socket. When the `'wsClientError'` event is emitted there is no
+`http.ServerResponse` object, so any HTTP response, including the response
+headers and body, must be written directly to the `socket`. If there is no
+listener for this event, the socket is closed with a default 4xx response
+containing a descriptive error message.
+
+### server.address()
+
+Returns an object with `port`, `family`, and `address` properties specifying the
+bound address, the address family name, and port of the server as reported by
+the operating system if listening on an IP socket. If the server is listening on
+a pipe or UNIX domain socket, the name is returned as a string.
+
+### server.clients
+
+- {Set}
+
+A set that stores all connected clients. Please note that this property is only
+added when the `clientTracking` is truthy.
+
+### server.close([callback])
+
+Prevent the server from accepting new connections and close the HTTP server if
+created internally. If an external HTTP server is used via the `server` or
+`noServer` constructor options, it must be closed manually. Existing connections
+are not closed automatically. The server emits a `'close'` event when all
+connections are closed unless an external HTTP server is used and client
+tracking is disabled. In this case the `'close'` event is emitted in the next
+tick. The optional callback is called when the `'close'` event occurs and
+receives an `Error` if the server is already closed.
+
+### server.handleUpgrade(request, socket, head, callback)
+
+- `request` {http.IncomingMessage} The client HTTP GET request.
+- `socket` {net.Socket|tls.Socket} The network socket between the server and
+ client.
+- `head` {Buffer} The first packet of the upgraded stream.
+- `callback` {Function}.
+
+Handle a HTTP upgrade request. When the HTTP server is created internally or
+when the HTTP server is passed via the `server` option, this method is called
+automatically. When operating in "noServer" mode, this method must be called
+manually.
+
+If the upgrade is successful, the `callback` is called with two arguments:
+
+- `websocket` {WebSocket} A `WebSocket` object.
+- `request` {http.IncomingMessage} The client HTTP GET request.
+
+### server.shouldHandle(request)
+
+- `request` {http.IncomingMessage} The client HTTP GET request.
+
+See if a given request should be handled by this server. By default this method
+validates the pathname of the request, matching it against the `path` option if
+provided. The return value, `true` or `false`, determines whether or not to
+accept the handshake.
+
+This method can be overridden when a custom handling logic is required.
+
+## Class: WebSocket
+
+This class represents a WebSocket. It extends the `EventEmitter`.
+
+### Ready state constants
+
+| Constant | Value | Description |
+| ---------- | ----- | ------------------------------------------------ |
+| CONNECTING | 0 | The connection is not yet open. |
+| OPEN | 1 | The connection is open and ready to communicate. |
+| CLOSING | 2 | The connection is in the process of closing. |
+| CLOSED | 3 | The connection is closed. |
+
+### new WebSocket(address[, protocols][, options])
+
+- `address` {String|url.URL} The URL to which to connect.
+- `protocols` {String|Array} The list of subprotocols.
+- `options` {Object}
+ - `followRedirects` {Boolean} Whether or not to follow redirects. Defaults to
+ `false`.
+ - `generateMask` {Function} The function used to generate the masking key. It
+ takes a `Buffer` that must be filled synchronously and is called before a
+ message is sent, for each message. By default the buffer is filled with
+ cryptographically strong random bytes.
+ - `handshakeTimeout` {Number} Timeout in milliseconds for the handshake
+ request. This is reset after every redirection.
+ - `maxPayload` {Number} The maximum allowed message size in bytes. Defaults to
+ 100 MiB (104857600 bytes).
+ - `maxRedirects` {Number} The maximum number of redirects allowed. Defaults
+ to 10.
+ - `origin` {String} Value of the `Origin` or `Sec-WebSocket-Origin` header
+ depending on the `protocolVersion`.
+ - `perMessageDeflate` {Boolean|Object} Enable/disable permessage-deflate.
+ - `protocolVersion` {Number} Value of the `Sec-WebSocket-Version` header.
+ - `skipUTF8Validation` {Boolean} Specifies whether or not to skip UTF-8
+ validation for text and close messages. Defaults to `false`. Set to `true`
+ only if the server is trusted.
+ - Any other option allowed in [`http.request()`][] or [`https.request()`][].
+ Options given do not have any effect if parsed from the URL given with the
+ `address` parameter.
+
+`perMessageDeflate` default value is `true`. When using an object, parameters
+are the same of the server. The only difference is the direction of requests.
+For example, `serverNoContextTakeover` can be used to ask the server to disable
+context takeover.
+
+Create a new WebSocket instance.
+
+#### UNIX Domain Sockets
+
+`ws` supports making requests to UNIX domain sockets. To make one, use the
+following URL scheme:
+
+```
+ws+unix:///absolute/path/to/uds_socket:/pathname?search_params
+```
+
+Note that `:` is the separator between the socket path and the URL path. If the
+URL path is omitted
+
+```
+ws+unix:///absolute/path/to/uds_socket
+```
+
+it defaults to `/`.
+
+### Event: 'close'
+
+- `code` {Number}
+- `reason` {Buffer}
+
+Emitted when the connection is closed. `code` is a numeric value indicating the
+status code explaining why the connection has been closed. `reason` is a
+`Buffer` containing a human-readable string explaining why the connection has
+been closed.
+
+### Event: 'error'
+
+- `error` {Error}
+
+Emitted when an error occurs. Errors may have a `.code` property, matching one
+of the string values defined below under [Error codes](#error-codes).
+
+### Event: 'message'
+
+- `data` {Buffer|ArrayBuffer|Buffer[]}
+- `isBinary` {Boolean}
+
+Emitted when a message is received. `data` is the message content. `isBinary`
+specifies whether the message is binary or not.
+
+### Event: 'open'
+
+Emitted when the connection is established.
+
+### Event: 'ping'
+
+- `data` {Buffer}
+
+Emitted when a ping is received from the server.
+
+### Event: 'pong'
+
+- `data` {Buffer}
+
+Emitted when a pong is received from the server.
+
+### Event: 'redirect'
+
+- `url` {String}
+- `request` {http.ClientRequest}
+
+Emitted before a redirect is followed. `url` is the redirect URL. `request` is
+the HTTP GET request with the headers queued. This event gives the ability to
+inspect confidential headers and remove them on a per-redirect basis using the
+[`request.getHeader()`][] and [`request.removeHeader()`][] API. The `request`
+object should be used only for this purpose. When there is at least one listener
+for this event, no header is removed by default, even if the redirect is to a
+different domain.
+
+### Event: 'unexpected-response'
+
+- `request` {http.ClientRequest}
+- `response` {http.IncomingMessage}
+
+Emitted when the server response is not the expected one, for example a 401
+response. This event gives the ability to read the response in order to extract
+useful information. If the server sends an invalid response and there isn't a
+listener for this event, an error is emitted.
+
+### Event: 'upgrade'
+
+- `response` {http.IncomingMessage}
+
+Emitted when response headers are received from the server as part of the
+handshake. This allows you to read headers from the server, for example
+'set-cookie' headers.
+
+### websocket.addEventListener(type, listener[, options])
+
+- `type` {String} A string representing the event type to listen for.
+- `listener` {Function} The listener to add.
+- `options` {Object}
+ - `once` {Boolean} A `Boolean` indicating that the listener should be invoked
+ at most once after being added. If `true`, the listener would be
+ automatically removed when invoked.
+
+Register an event listener emulating the `EventTarget` interface. This method
+does nothing if `type` is not one of `'close'`, `'error'`, `'message'`, or
+`'open'`.
+
+### websocket.binaryType
+
+- {String}
+
+A string indicating the type of binary data being transmitted by the connection.
+This should be one of "nodebuffer", "arraybuffer" or "fragments". Defaults to
+"nodebuffer". Type "fragments" will emit the array of fragments as received from
+the sender, without copyfull concatenation, which is useful for the performance
+of binary protocols transferring large messages with multiple fragments.
+
+### websocket.bufferedAmount
+
+- {Number}
+
+The number of bytes of data that have been queued using calls to `send()` but
+not yet transmitted to the network. This deviates from the HTML standard in the
+following ways:
+
+1. If the data is immediately sent the value is `0`.
+1. All framing bytes are included.
+
+### websocket.close([code[, reason]])
+
+- `code` {Number} A numeric value indicating the status code explaining why the
+ connection is being closed.
+- `reason` {String|Buffer} The reason why the connection is closing.
+
+Initiate a closing handshake.
+
+### websocket.isPaused
+
+- {Boolean}
+
+Indicates whether the websocket is paused.
+
+### websocket.extensions
+
+- {Object}
+
+An object containing the negotiated extensions.
+
+### websocket.onclose
+
+- {Function}
+
+An event listener to be called when connection is closed. The listener receives
+a `CloseEvent` named "close".
+
+### websocket.onerror
+
+- {Function}
+
+An event listener to be called when an error occurs. The listener receives an
+`ErrorEvent` named "error".
+
+### websocket.onmessage
+
+- {Function}
+
+An event listener to be called when a message is received from the server. The
+listener receives a `MessageEvent` named "message".
+
+### websocket.onopen
+
+- {Function}
+
+An event listener to be called when the connection is established. The listener
+receives an `OpenEvent` named "open".
+
+### websocket.pause()
+
+Pause the websocket causing it to stop emitting events. Some events can still be
+emitted after this is called, until all buffered data is consumed. This method
+is a noop if the ready state is `CONNECTING` or `CLOSED`.
+
+### websocket.ping([data[, mask]][, callback])
+
+- `data` {Array|Number|Object|String|ArrayBuffer|Buffer|DataView|TypedArray} The
+ data to send in the ping frame.
+- `mask` {Boolean} Specifies whether `data` should be masked or not. Defaults to
+ `true` when `websocket` is not a server client.
+- `callback` {Function} An optional callback which is invoked when the ping
+ frame is written out. If an error occurs, the callback is called with the
+ error as its first argument.
+
+Send a ping. This method throws an error if the ready state is `CONNECTING`.
+
+### websocket.pong([data[, mask]][, callback])
+
+- `data` {Array|Number|Object|String|ArrayBuffer|Buffer|DataView|TypedArray} The
+ data to send in the pong frame.
+- `mask` {Boolean} Specifies whether `data` should be masked or not. Defaults to
+ `true` when `websocket` is not a server client.
+- `callback` {Function} An optional callback which is invoked when the pong
+ frame is written out. If an error occurs, the callback is called with the
+ error as its first argument.
+
+Send a pong. This method throws an error if the ready state is `CONNECTING`.
+
+### websocket.protocol
+
+- {String}
+
+The subprotocol selected by the server.
+
+### websocket.resume()
+
+Make a paused socket resume emitting events. This method is a noop if the ready
+state is `CONNECTING` or `CLOSED`.
+
+### websocket.readyState
+
+- {Number}
+
+The current state of the connection. This is one of the ready state constants.
+
+### websocket.removeEventListener(type, listener)
+
+- `type` {String} A string representing the event type to remove.
+- `listener` {Function} The listener to remove.
+
+Removes an event listener emulating the `EventTarget` interface. This method
+only removes listeners added with
+[`websocket.addEventListener()`](#websocketaddeventlistenertype-listener-options).
+
+### websocket.send(data[, options][, callback])
+
+- `data` {Array|Number|Object|String|ArrayBuffer|Buffer|DataView|TypedArray} The
+ data to send.
+- `options` {Object}
+ - `binary` {Boolean} Specifies whether `data` should be sent as a binary or
+ not. Default is autodetected.
+ - `compress` {Boolean} Specifies whether `data` should be compressed or not.
+ Defaults to `true` when permessage-deflate is enabled.
+ - `fin` {Boolean} Specifies whether `data` is the last fragment of a message
+ or not. Defaults to `true`.
+ - `mask` {Boolean} Specifies whether `data` should be masked or not. Defaults
+ to `true` when `websocket` is not a server client.
+- `callback` {Function} An optional callback which is invoked when `data` is
+ written out. If an error occurs, the callback is called with the error as its
+ first argument.
+
+Send `data` through the connection. This method throws an error if the ready
+state is `CONNECTING`.
+
+### websocket.terminate()
+
+Forcibly close the connection. Internally this calls [`socket.destroy()`][].
+
+### websocket.url
+
+- {String}
+
+The URL of the WebSocket server. Server clients don't have this attribute.
+
+## createWebSocketStream(websocket[, options])
+
+- `websocket` {WebSocket} A `WebSocket` object.
+- `options` {Object} [Options][duplex-options] to pass to the `Duplex`
+ constructor.
+
+Returns a `Duplex` stream that allows to use the Node.js streams API on top of a
+given `WebSocket`.
+
+## Environment variables
+
+### WS_NO_BUFFER_UTIL
+
+When set to a non empty value, prevents the optional `bufferutil` dependency
+from being required.
+
+### WS_NO_UTF_8_VALIDATE
+
+When set to a non empty value, prevents the optional `utf-8-validate` dependency
+from being required.
+
+## Error codes
+
+Errors emitted by the websocket may have a `.code` property, describing the
+specific type of error that has occurred:
+
+### WS_ERR_EXPECTED_FIN
+
+A WebSocket frame was received with the FIN bit not set when it was expected.
+
+### WS_ERR_EXPECTED_MASK
+
+An unmasked WebSocket frame was received by a WebSocket server.
+
+### WS_ERR_INVALID_CLOSE_CODE
+
+A WebSocket close frame was received with an invalid close code.
+
+### WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH
+
+A control frame with an invalid payload length was received.
+
+### WS_ERR_INVALID_OPCODE
+
+A WebSocket frame was received with an invalid opcode.
+
+### WS_ERR_INVALID_UTF8
+
+A text or close frame was received containing invalid UTF-8 data.
+
+### WS_ERR_UNEXPECTED_MASK
+
+A masked WebSocket frame was received by a WebSocket client.
+
+### WS_ERR_UNEXPECTED_RSV_1
+
+A WebSocket frame was received with the RSV1 bit set unexpectedly.
+
+### WS_ERR_UNEXPECTED_RSV_2_3
+
+A WebSocket frame was received with the RSV2 or RSV3 bit set unexpectedly.
+
+### WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH
+
+A data frame was received with a length longer than the max supported length
+(2^53 - 1, due to JavaScript language limitations).
+
+### WS_ERR_UNSUPPORTED_MESSAGE_LENGTH
+
+A message was received with a length longer than the maximum supported length,
+as configured by the `maxPayload` option.
+
+[concurrency-limit]: https://github.com/websockets/ws/issues/1202
+[duplex-options]:
+ https://nodejs.org/api/stream.html#stream_new_stream_duplex_options
+[`http.request()`]:
+ https://nodejs.org/api/http.html#http_http_request_options_callback
+[`https.request()`]:
+ https://nodejs.org/api/https.html#https_https_request_options_callback
+[permessage-deflate]:
+ https://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-19
+[`request.getheader()`]: https://nodejs.org/api/http.html#requestgetheadername
+[`request.removeheader()`]:
+ https://nodejs.org/api/http.html#requestremoveheadername
+[`socket.destroy()`]: https://nodejs.org/api/net.html#net_socket_destroy_error
+[zlib-options]: https://nodejs.org/api/zlib.html#zlib_class_options
diff --git a/testing/xpcshell/node-ws/examples/express-session-parse/index.js b/testing/xpcshell/node-ws/examples/express-session-parse/index.js
new file mode 100644
index 0000000000..b62a2e4a5f
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/express-session-parse/index.js
@@ -0,0 +1,101 @@
+'use strict';
+
+const session = require('express-session');
+const express = require('express');
+const http = require('http');
+const uuid = require('uuid');
+
+const { WebSocketServer } = require('../..');
+
+const app = express();
+const map = new Map();
+
+//
+// We need the same instance of the session parser in express and
+// WebSocket server.
+//
+const sessionParser = session({
+ saveUninitialized: false,
+ secret: '$eCuRiTy',
+ resave: false
+});
+
+//
+// Serve static files from the 'public' folder.
+//
+app.use(express.static('public'));
+app.use(sessionParser);
+
+app.post('/login', function (req, res) {
+ //
+ // "Log in" user and set userId to session.
+ //
+ const id = uuid.v4();
+
+ console.log(`Updating session for user ${id}`);
+ req.session.userId = id;
+ res.send({ result: 'OK', message: 'Session updated' });
+});
+
+app.delete('/logout', function (request, response) {
+ const ws = map.get(request.session.userId);
+
+ console.log('Destroying session');
+ request.session.destroy(function () {
+ if (ws) ws.close();
+
+ response.send({ result: 'OK', message: 'Session destroyed' });
+ });
+});
+
+//
+// Create an HTTP server.
+//
+const server = http.createServer(app);
+
+//
+// Create a WebSocket server completely detached from the HTTP server.
+//
+const wss = new WebSocketServer({ clientTracking: false, noServer: true });
+
+server.on('upgrade', function (request, socket, head) {
+ console.log('Parsing session from request...');
+
+ sessionParser(request, {}, () => {
+ if (!request.session.userId) {
+ socket.write('HTTP/1.1 401 Unauthorized\r\n\r\n');
+ socket.destroy();
+ return;
+ }
+
+ console.log('Session is parsed!');
+
+ wss.handleUpgrade(request, socket, head, function (ws) {
+ wss.emit('connection', ws, request);
+ });
+ });
+});
+
+wss.on('connection', function (ws, request) {
+ const userId = request.session.userId;
+
+ map.set(userId, ws);
+
+ ws.on('message', function (message) {
+ //
+ // Here we can now use session parameters.
+ //
+ console.log(`Received message ${message} from user ${userId}`);
+ });
+
+ ws.on('close', function () {
+ map.delete(userId);
+ });
+});
+
+//
+// Start the server.
+//
+server.listen(8080, function () {
+ console.log('Listening on http://localhost:8080');
+});
diff --git a/testing/xpcshell/node-ws/examples/express-session-parse/package.json b/testing/xpcshell/node-ws/examples/express-session-parse/package.json
new file mode 100644
index 0000000000..406706ce8a
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/express-session-parse/package.json
@@ -0,0 +1,11 @@
+{
+ "author": "",
+ "name": "express-session-parse",
+ "version": "0.0.0",
+ "repository": "websockets/ws",
+ "dependencies": {
+ "express": "^4.16.4",
+ "express-session": "^1.16.1",
+ "uuid": "^8.3.2"
+ }
+}
diff --git a/testing/xpcshell/node-ws/examples/express-session-parse/public/app.js b/testing/xpcshell/node-ws/examples/express-session-parse/public/app.js
new file mode 100644
index 0000000000..f70dc21835
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/express-session-parse/public/app.js
@@ -0,0 +1,67 @@
+(function () {
+ const messages = document.querySelector('#messages');
+ const wsButton = document.querySelector('#wsButton');
+ const wsSendButton = document.querySelector('#wsSendButton');
+ const logout = document.querySelector('#logout');
+ const login = document.querySelector('#login');
+
+ function showMessage(message) {
+ messages.textContent += `\n${message}`;
+ messages.scrollTop = messages.scrollHeight;
+ }
+
+ function handleResponse(response) {
+ return response.ok
+ ? response.json().then((data) => JSON.stringify(data, null, 2))
+ : Promise.reject(new Error('Unexpected response'));
+ }
+
+ login.onclick = function () {
+ fetch('/login', { method: 'POST', credentials: 'same-origin' })
+ .then(handleResponse)
+ .then(showMessage)
+ .catch(function (err) {
+ showMessage(err.message);
+ });
+ };
+
+ logout.onclick = function () {
+ fetch('/logout', { method: 'DELETE', credentials: 'same-origin' })
+ .then(handleResponse)
+ .then(showMessage)
+ .catch(function (err) {
+ showMessage(err.message);
+ });
+ };
+
+ let ws;
+
+ wsButton.onclick = function () {
+ if (ws) {
+ ws.onerror = ws.onopen = ws.onclose = null;
+ ws.close();
+ }
+
+ ws = new WebSocket(`ws://${location.host}`);
+ ws.onerror = function () {
+ showMessage('WebSocket error');
+ };
+ ws.onopen = function () {
+ showMessage('WebSocket connection established');
+ };
+ ws.onclose = function () {
+ showMessage('WebSocket connection closed');
+ ws = null;
+ };
+ };
+
+ wsSendButton.onclick = function () {
+ if (!ws) {
+ showMessage('No WebSocket connection');
+ return;
+ }
+
+ ws.send('Hello World!');
+ showMessage('Sent "Hello World!"');
+ };
+})();
diff --git a/testing/xpcshell/node-ws/examples/express-session-parse/public/index.html b/testing/xpcshell/node-ws/examples/express-session-parse/public/index.html
new file mode 100644
index 0000000000..c07aa2e87a
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/express-session-parse/public/index.html
@@ -0,0 +1,24 @@
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title>Express session demo</title>
+ </head>
+ <body>
+ <h1>Choose an action.</h1>
+ <button id="login" type="button" title="Simulate login">
+ Simulate login
+ </button>
+ <button id="logout" type="button" title="Simulate logout">
+ Simulate logout
+ </button>
+ <button id="wsButton" type="button" title="Open WebSocket connection">
+ Open WebSocket connection
+ </button>
+ <button id="wsSendButton" type="button" title="Send WebSocket message">
+ Send WebSocket message
+ </button>
+ <pre id="messages" style="height: 400px; overflow: scroll"></pre>
+ <script src="app.js"></script>
+ </body>
+</html>
diff --git a/testing/xpcshell/node-ws/examples/server-stats/index.js b/testing/xpcshell/node-ws/examples/server-stats/index.js
new file mode 100644
index 0000000000..e8754b5b28
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/server-stats/index.js
@@ -0,0 +1,33 @@
+'use strict';
+
+const express = require('express');
+const path = require('path');
+const { createServer } = require('http');
+
+const { WebSocketServer } = require('../..');
+
+const app = express();
+app.use(express.static(path.join(__dirname, '/public')));
+
+const server = createServer(app);
+const wss = new WebSocketServer({ server });
+
+wss.on('connection', function (ws) {
+ const id = setInterval(function () {
+ ws.send(JSON.stringify(process.memoryUsage()), function () {
+ //
+ // Ignore errors.
+ //
+ });
+ }, 100);
+ console.log('started client interval');
+
+ ws.on('close', function () {
+ console.log('stopping client interval');
+ clearInterval(id);
+ });
+});
+
+server.listen(8080, function () {
+ console.log('Listening on http://localhost:8080');
+});
diff --git a/testing/xpcshell/node-ws/examples/server-stats/package.json b/testing/xpcshell/node-ws/examples/server-stats/package.json
new file mode 100644
index 0000000000..20e2029133
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/server-stats/package.json
@@ -0,0 +1,9 @@
+{
+ "author": "",
+ "name": "serverstats",
+ "version": "0.0.0",
+ "repository": "websockets/ws",
+ "dependencies": {
+ "express": "^4.16.4"
+ }
+}
diff --git a/testing/xpcshell/node-ws/examples/server-stats/public/index.html b/testing/xpcshell/node-ws/examples/server-stats/public/index.html
new file mode 100644
index 0000000000..a82815af6f
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/server-stats/public/index.html
@@ -0,0 +1,63 @@
+<!DOCTYPE html>
+<html lang="en">
+ <head>
+ <meta charset="utf-8">
+ <title>Server stats</title>
+ <style>
+ table, td {
+ border: 1px solid #333;
+ }
+
+ thead {
+ background-color: #333;
+ color: #fff;
+ }
+ </style>
+ </head>
+ <body>
+ <h1>Server stats</h1>
+ <table>
+ <thead>
+ <tr>
+ <th colspan="2">Memory usage</th>
+ </tr>
+ </thead>
+ <tbody>
+ <tr>
+ <td>RSS</td>
+ <td id="rss"></td>
+ </tr>
+ <tr>
+ <td>Heap total</td>
+ <td id="heapTotal"></td>
+ </tr>
+ <tr>
+ <td>Heap used</td>
+ <td id="heapUsed"></td>
+ </tr>
+ <tr>
+ <td>External</td>
+ <td id="external"></td>
+ </tr>
+ </tbody>
+ </table>
+ <script>
+ (function() {
+ const rss = document.getElementById('rss');
+ const heapTotal = document.getElementById('heapTotal');
+ const heapUsed = document.getElementById('heapUsed');
+ const external = document.getElementById('external');
+ const ws = new WebSocket(`ws://${location.host}`);
+
+ ws.onmessage = function(event) {
+ const data = JSON.parse(event.data);
+
+ rss.textContent = data.rss;
+ heapTotal.textContent = data.heapTotal;
+ heapUsed.textContent = data.heapUsed;
+ external.textContent = data.external;
+ };
+ })();
+ </script>
+ </body>
+</html>
diff --git a/testing/xpcshell/node-ws/examples/ssl.js b/testing/xpcshell/node-ws/examples/ssl.js
new file mode 100644
index 0000000000..a5e750b799
--- /dev/null
+++ b/testing/xpcshell/node-ws/examples/ssl.js
@@ -0,0 +1,37 @@
+'use strict';
+
+const https = require('https');
+const fs = require('fs');
+
+const { WebSocket, WebSocketServer } = require('..');
+
+const server = https.createServer({
+ cert: fs.readFileSync('../test/fixtures/certificate.pem'),
+ key: fs.readFileSync('../test/fixtures/key.pem')
+});
+
+const wss = new WebSocketServer({ server });
+
+wss.on('connection', function connection(ws) {
+ ws.on('message', function message(msg) {
+ console.log(msg.toString());
+ });
+});
+
+server.listen(function listening() {
+ //
+ // If the `rejectUnauthorized` option is not `false`, the server certificate
+ // is verified against a list of well-known CAs. An 'error' event is emitted
+ // if verification fails.
+ //
+ // The certificate used in this example is self-signed so `rejectUnauthorized`
+ // is set to `false`.
+ //
+ const ws = new WebSocket(`wss://localhost:${server.address().port}`, {
+ rejectUnauthorized: false
+ });
+
+ ws.on('open', function open() {
+ ws.send('All glory to WebSockets!');
+ });
+});
diff --git a/testing/xpcshell/node-ws/index.js b/testing/xpcshell/node-ws/index.js
new file mode 100644
index 0000000000..41edb3b81b
--- /dev/null
+++ b/testing/xpcshell/node-ws/index.js
@@ -0,0 +1,13 @@
+'use strict';
+
+const WebSocket = require('./lib/websocket');
+
+WebSocket.createWebSocketStream = require('./lib/stream');
+WebSocket.Server = require('./lib/websocket-server');
+WebSocket.Receiver = require('./lib/receiver');
+WebSocket.Sender = require('./lib/sender');
+
+WebSocket.WebSocket = WebSocket;
+WebSocket.WebSocketServer = WebSocket.Server;
+
+module.exports = WebSocket;
diff --git a/testing/xpcshell/node-ws/lib/buffer-util.js b/testing/xpcshell/node-ws/lib/buffer-util.js
new file mode 100644
index 0000000000..df75955467
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/buffer-util.js
@@ -0,0 +1,127 @@
+'use strict';
+
+const { EMPTY_BUFFER } = require('./constants');
+
+/**
+ * Merges an array of buffers into a new buffer.
+ *
+ * @param {Buffer[]} list The array of buffers to concat
+ * @param {Number} totalLength The total length of buffers in the list
+ * @return {Buffer} The resulting buffer
+ * @public
+ */
+function concat(list, totalLength) {
+ if (list.length === 0) return EMPTY_BUFFER;
+ if (list.length === 1) return list[0];
+
+ const target = Buffer.allocUnsafe(totalLength);
+ let offset = 0;
+
+ for (let i = 0; i < list.length; i++) {
+ const buf = list[i];
+ target.set(buf, offset);
+ offset += buf.length;
+ }
+
+ if (offset < totalLength) return target.slice(0, offset);
+
+ return target;
+}
+
+/**
+ * Masks a buffer using the given mask.
+ *
+ * @param {Buffer} source The buffer to mask
+ * @param {Buffer} mask The mask to use
+ * @param {Buffer} output The buffer where to store the result
+ * @param {Number} offset The offset at which to start writing
+ * @param {Number} length The number of bytes to mask.
+ * @public
+ */
+function _mask(source, mask, output, offset, length) {
+ for (let i = 0; i < length; i++) {
+ output[offset + i] = source[i] ^ mask[i & 3];
+ }
+}
+
+/**
+ * Unmasks a buffer using the given mask.
+ *
+ * @param {Buffer} buffer The buffer to unmask
+ * @param {Buffer} mask The mask to use
+ * @public
+ */
+function _unmask(buffer, mask) {
+ for (let i = 0; i < buffer.length; i++) {
+ buffer[i] ^= mask[i & 3];
+ }
+}
+
+/**
+ * Converts a buffer to an `ArrayBuffer`.
+ *
+ * @param {Buffer} buf The buffer to convert
+ * @return {ArrayBuffer} Converted buffer
+ * @public
+ */
+function toArrayBuffer(buf) {
+ if (buf.byteLength === buf.buffer.byteLength) {
+ return buf.buffer;
+ }
+
+ return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);
+}
+
+/**
+ * Converts `data` to a `Buffer`.
+ *
+ * @param {*} data The data to convert
+ * @return {Buffer} The buffer
+ * @throws {TypeError}
+ * @public
+ */
+function toBuffer(data) {
+ toBuffer.readOnly = true;
+
+ if (Buffer.isBuffer(data)) return data;
+
+ let buf;
+
+ if (data instanceof ArrayBuffer) {
+ buf = Buffer.from(data);
+ } else if (ArrayBuffer.isView(data)) {
+ buf = Buffer.from(data.buffer, data.byteOffset, data.byteLength);
+ } else {
+ buf = Buffer.from(data);
+ toBuffer.readOnly = false;
+ }
+
+ return buf;
+}
+
+module.exports = {
+ concat,
+ mask: _mask,
+ toArrayBuffer,
+ toBuffer,
+ unmask: _unmask
+};
+
+/* istanbul ignore else */
+if (!process.env.WS_NO_BUFFER_UTIL) {
+ try {
+ const bufferUtil = require('bufferutil');
+
+ module.exports.mask = function (source, mask, output, offset, length) {
+ if (length < 48) _mask(source, mask, output, offset, length);
+ else bufferUtil.mask(source, mask, output, offset, length);
+ };
+
+ module.exports.unmask = function (buffer, mask) {
+ if (buffer.length < 32) _unmask(buffer, mask);
+ else bufferUtil.unmask(buffer, mask);
+ };
+ } catch (e) {
+ // Continue regardless of the error.
+ }
+}
diff --git a/testing/xpcshell/node-ws/lib/constants.js b/testing/xpcshell/node-ws/lib/constants.js
new file mode 100644
index 0000000000..d691b30a17
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/constants.js
@@ -0,0 +1,12 @@
+'use strict';
+
+module.exports = {
+ BINARY_TYPES: ['nodebuffer', 'arraybuffer', 'fragments'],
+ EMPTY_BUFFER: Buffer.alloc(0),
+ GUID: '258EAFA5-E914-47DA-95CA-C5AB0DC85B11',
+ kForOnEventAttribute: Symbol('kIsForOnEventAttribute'),
+ kListener: Symbol('kListener'),
+ kStatusCode: Symbol('status-code'),
+ kWebSocket: Symbol('websocket'),
+ NOOP: () => {}
+};
diff --git a/testing/xpcshell/node-ws/lib/event-target.js b/testing/xpcshell/node-ws/lib/event-target.js
new file mode 100644
index 0000000000..d5abd83a0f
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/event-target.js
@@ -0,0 +1,266 @@
+'use strict';
+
+const { kForOnEventAttribute, kListener } = require('./constants');
+
+const kCode = Symbol('kCode');
+const kData = Symbol('kData');
+const kError = Symbol('kError');
+const kMessage = Symbol('kMessage');
+const kReason = Symbol('kReason');
+const kTarget = Symbol('kTarget');
+const kType = Symbol('kType');
+const kWasClean = Symbol('kWasClean');
+
+/**
+ * Class representing an event.
+ */
+class Event {
+ /**
+ * Create a new `Event`.
+ *
+ * @param {String} type The name of the event
+ * @throws {TypeError} If the `type` argument is not specified
+ */
+ constructor(type) {
+ this[kTarget] = null;
+ this[kType] = type;
+ }
+
+ /**
+ * @type {*}
+ */
+ get target() {
+ return this[kTarget];
+ }
+
+ /**
+ * @type {String}
+ */
+ get type() {
+ return this[kType];
+ }
+}
+
+Object.defineProperty(Event.prototype, 'target', { enumerable: true });
+Object.defineProperty(Event.prototype, 'type', { enumerable: true });
+
+/**
+ * Class representing a close event.
+ *
+ * @extends Event
+ */
+class CloseEvent extends Event {
+ /**
+ * Create a new `CloseEvent`.
+ *
+ * @param {String} type The name of the event
+ * @param {Object} [options] A dictionary object that allows for setting
+ * attributes via object members of the same name
+ * @param {Number} [options.code=0] The status code explaining why the
+ * connection was closed
+ * @param {String} [options.reason=''] A human-readable string explaining why
+ * the connection was closed
+ * @param {Boolean} [options.wasClean=false] Indicates whether or not the
+ * connection was cleanly closed
+ */
+ constructor(type, options = {}) {
+ super(type);
+
+ this[kCode] = options.code === undefined ? 0 : options.code;
+ this[kReason] = options.reason === undefined ? '' : options.reason;
+ this[kWasClean] = options.wasClean === undefined ? false : options.wasClean;
+ }
+
+ /**
+ * @type {Number}
+ */
+ get code() {
+ return this[kCode];
+ }
+
+ /**
+ * @type {String}
+ */
+ get reason() {
+ return this[kReason];
+ }
+
+ /**
+ * @type {Boolean}
+ */
+ get wasClean() {
+ return this[kWasClean];
+ }
+}
+
+Object.defineProperty(CloseEvent.prototype, 'code', { enumerable: true });
+Object.defineProperty(CloseEvent.prototype, 'reason', { enumerable: true });
+Object.defineProperty(CloseEvent.prototype, 'wasClean', { enumerable: true });
+
+/**
+ * Class representing an error event.
+ *
+ * @extends Event
+ */
+class ErrorEvent extends Event {
+ /**
+ * Create a new `ErrorEvent`.
+ *
+ * @param {String} type The name of the event
+ * @param {Object} [options] A dictionary object that allows for setting
+ * attributes via object members of the same name
+ * @param {*} [options.error=null] The error that generated this event
+ * @param {String} [options.message=''] The error message
+ */
+ constructor(type, options = {}) {
+ super(type);
+
+ this[kError] = options.error === undefined ? null : options.error;
+ this[kMessage] = options.message === undefined ? '' : options.message;
+ }
+
+ /**
+ * @type {*}
+ */
+ get error() {
+ return this[kError];
+ }
+
+ /**
+ * @type {String}
+ */
+ get message() {
+ return this[kMessage];
+ }
+}
+
+Object.defineProperty(ErrorEvent.prototype, 'error', { enumerable: true });
+Object.defineProperty(ErrorEvent.prototype, 'message', { enumerable: true });
+
+/**
+ * Class representing a message event.
+ *
+ * @extends Event
+ */
+class MessageEvent extends Event {
+ /**
+ * Create a new `MessageEvent`.
+ *
+ * @param {String} type The name of the event
+ * @param {Object} [options] A dictionary object that allows for setting
+ * attributes via object members of the same name
+ * @param {*} [options.data=null] The message content
+ */
+ constructor(type, options = {}) {
+ super(type);
+
+ this[kData] = options.data === undefined ? null : options.data;
+ }
+
+ /**
+ * @type {*}
+ */
+ get data() {
+ return this[kData];
+ }
+}
+
+Object.defineProperty(MessageEvent.prototype, 'data', { enumerable: true });
+
+/**
+ * This provides methods for emulating the `EventTarget` interface. It's not
+ * meant to be used directly.
+ *
+ * @mixin
+ */
+const EventTarget = {
+ /**
+ * Register an event listener.
+ *
+ * @param {String} type A string representing the event type to listen for
+ * @param {Function} listener The listener to add
+ * @param {Object} [options] An options object specifies characteristics about
+ * the event listener
+ * @param {Boolean} [options.once=false] A `Boolean` indicating that the
+ * listener should be invoked at most once after being added. If `true`,
+ * the listener would be automatically removed when invoked.
+ * @public
+ */
+ addEventListener(type, listener, options = {}) {
+ let wrapper;
+
+ if (type === 'message') {
+ wrapper = function onMessage(data, isBinary) {
+ const event = new MessageEvent('message', {
+ data: isBinary ? data : data.toString()
+ });
+
+ event[kTarget] = this;
+ listener.call(this, event);
+ };
+ } else if (type === 'close') {
+ wrapper = function onClose(code, message) {
+ const event = new CloseEvent('close', {
+ code,
+ reason: message.toString(),
+ wasClean: this._closeFrameReceived && this._closeFrameSent
+ });
+
+ event[kTarget] = this;
+ listener.call(this, event);
+ };
+ } else if (type === 'error') {
+ wrapper = function onError(error) {
+ const event = new ErrorEvent('error', {
+ error,
+ message: error.message
+ });
+
+ event[kTarget] = this;
+ listener.call(this, event);
+ };
+ } else if (type === 'open') {
+ wrapper = function onOpen() {
+ const event = new Event('open');
+
+ event[kTarget] = this;
+ listener.call(this, event);
+ };
+ } else {
+ return;
+ }
+
+ wrapper[kForOnEventAttribute] = !!options[kForOnEventAttribute];
+ wrapper[kListener] = listener;
+
+ if (options.once) {
+ this.once(type, wrapper);
+ } else {
+ this.on(type, wrapper);
+ }
+ },
+
+ /**
+ * Remove an event listener.
+ *
+ * @param {String} type A string representing the event type to remove
+ * @param {Function} handler The listener to remove
+ * @public
+ */
+ removeEventListener(type, handler) {
+ for (const listener of this.listeners(type)) {
+ if (listener[kListener] === handler && !listener[kForOnEventAttribute]) {
+ this.removeListener(type, listener);
+ break;
+ }
+ }
+ }
+};
+
+module.exports = {
+ CloseEvent,
+ ErrorEvent,
+ Event,
+ EventTarget,
+ MessageEvent
+};
diff --git a/testing/xpcshell/node-ws/lib/extension.js b/testing/xpcshell/node-ws/lib/extension.js
new file mode 100644
index 0000000000..3d7895c1b0
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/extension.js
@@ -0,0 +1,203 @@
+'use strict';
+
+const { tokenChars } = require('./validation');
+
+/**
+ * Adds an offer to the map of extension offers or a parameter to the map of
+ * parameters.
+ *
+ * @param {Object} dest The map of extension offers or parameters
+ * @param {String} name The extension or parameter name
+ * @param {(Object|Boolean|String)} elem The extension parameters or the
+ * parameter value
+ * @private
+ */
+function push(dest, name, elem) {
+ if (dest[name] === undefined) dest[name] = [elem];
+ else dest[name].push(elem);
+}
+
+/**
+ * Parses the `Sec-WebSocket-Extensions` header into an object.
+ *
+ * @param {String} header The field value of the header
+ * @return {Object} The parsed object
+ * @public
+ */
+function parse(header) {
+ const offers = Object.create(null);
+ let params = Object.create(null);
+ let mustUnescape = false;
+ let isEscaping = false;
+ let inQuotes = false;
+ let extensionName;
+ let paramName;
+ let start = -1;
+ let code = -1;
+ let end = -1;
+ let i = 0;
+
+ for (; i < header.length; i++) {
+ code = header.charCodeAt(i);
+
+ if (extensionName === undefined) {
+ if (end === -1 && tokenChars[code] === 1) {
+ if (start === -1) start = i;
+ } else if (
+ i !== 0 &&
+ (code === 0x20 /* ' ' */ || code === 0x09) /* '\t' */
+ ) {
+ if (end === -1 && start !== -1) end = i;
+ } else if (code === 0x3b /* ';' */ || code === 0x2c /* ',' */) {
+ if (start === -1) {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+
+ if (end === -1) end = i;
+ const name = header.slice(start, end);
+ if (code === 0x2c) {
+ push(offers, name, params);
+ params = Object.create(null);
+ } else {
+ extensionName = name;
+ }
+
+ start = end = -1;
+ } else {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+ } else if (paramName === undefined) {
+ if (end === -1 && tokenChars[code] === 1) {
+ if (start === -1) start = i;
+ } else if (code === 0x20 || code === 0x09) {
+ if (end === -1 && start !== -1) end = i;
+ } else if (code === 0x3b || code === 0x2c) {
+ if (start === -1) {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+
+ if (end === -1) end = i;
+ push(params, header.slice(start, end), true);
+ if (code === 0x2c) {
+ push(offers, extensionName, params);
+ params = Object.create(null);
+ extensionName = undefined;
+ }
+
+ start = end = -1;
+ } else if (code === 0x3d /* '=' */ && start !== -1 && end === -1) {
+ paramName = header.slice(start, i);
+ start = end = -1;
+ } else {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+ } else {
+ //
+ // The value of a quoted-string after unescaping must conform to the
+ // token ABNF, so only token characters are valid.
+ // Ref: https://tools.ietf.org/html/rfc6455#section-9.1
+ //
+ if (isEscaping) {
+ if (tokenChars[code] !== 1) {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+ if (start === -1) start = i;
+ else if (!mustUnescape) mustUnescape = true;
+ isEscaping = false;
+ } else if (inQuotes) {
+ if (tokenChars[code] === 1) {
+ if (start === -1) start = i;
+ } else if (code === 0x22 /* '"' */ && start !== -1) {
+ inQuotes = false;
+ end = i;
+ } else if (code === 0x5c /* '\' */) {
+ isEscaping = true;
+ } else {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+ } else if (code === 0x22 && header.charCodeAt(i - 1) === 0x3d) {
+ inQuotes = true;
+ } else if (end === -1 && tokenChars[code] === 1) {
+ if (start === -1) start = i;
+ } else if (start !== -1 && (code === 0x20 || code === 0x09)) {
+ if (end === -1) end = i;
+ } else if (code === 0x3b || code === 0x2c) {
+ if (start === -1) {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+
+ if (end === -1) end = i;
+ let value = header.slice(start, end);
+ if (mustUnescape) {
+ value = value.replace(/\\/g, '');
+ mustUnescape = false;
+ }
+ push(params, paramName, value);
+ if (code === 0x2c) {
+ push(offers, extensionName, params);
+ params = Object.create(null);
+ extensionName = undefined;
+ }
+
+ paramName = undefined;
+ start = end = -1;
+ } else {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+ }
+ }
+
+ if (start === -1 || inQuotes || code === 0x20 || code === 0x09) {
+ throw new SyntaxError('Unexpected end of input');
+ }
+
+ if (end === -1) end = i;
+ const token = header.slice(start, end);
+ if (extensionName === undefined) {
+ push(offers, token, params);
+ } else {
+ if (paramName === undefined) {
+ push(params, token, true);
+ } else if (mustUnescape) {
+ push(params, paramName, token.replace(/\\/g, ''));
+ } else {
+ push(params, paramName, token);
+ }
+ push(offers, extensionName, params);
+ }
+
+ return offers;
+}
+
+/**
+ * Builds the `Sec-WebSocket-Extensions` header field value.
+ *
+ * @param {Object} extensions The map of extensions and parameters to format
+ * @return {String} A string representing the given object
+ * @public
+ */
+function format(extensions) {
+ return Object.keys(extensions)
+ .map((extension) => {
+ let configurations = extensions[extension];
+ if (!Array.isArray(configurations)) configurations = [configurations];
+ return configurations
+ .map((params) => {
+ return [extension]
+ .concat(
+ Object.keys(params).map((k) => {
+ let values = params[k];
+ if (!Array.isArray(values)) values = [values];
+ return values
+ .map((v) => (v === true ? k : `${k}=${v}`))
+ .join('; ');
+ })
+ )
+ .join('; ');
+ })
+ .join(', ');
+ })
+ .join(', ');
+}
+
+module.exports = { format, parse };
diff --git a/testing/xpcshell/node-ws/lib/limiter.js b/testing/xpcshell/node-ws/lib/limiter.js
new file mode 100644
index 0000000000..3fd35784ea
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/limiter.js
@@ -0,0 +1,55 @@
+'use strict';
+
+const kDone = Symbol('kDone');
+const kRun = Symbol('kRun');
+
+/**
+ * A very simple job queue with adjustable concurrency. Adapted from
+ * https://github.com/STRML/async-limiter
+ */
+class Limiter {
+ /**
+ * Creates a new `Limiter`.
+ *
+ * @param {Number} [concurrency=Infinity] The maximum number of jobs allowed
+ * to run concurrently
+ */
+ constructor(concurrency) {
+ this[kDone] = () => {
+ this.pending--;
+ this[kRun]();
+ };
+ this.concurrency = concurrency || Infinity;
+ this.jobs = [];
+ this.pending = 0;
+ }
+
+ /**
+ * Adds a job to the queue.
+ *
+ * @param {Function} job The job to run
+ * @public
+ */
+ add(job) {
+ this.jobs.push(job);
+ this[kRun]();
+ }
+
+ /**
+ * Removes a job from the queue and runs it if possible.
+ *
+ * @private
+ */
+ [kRun]() {
+ if (this.pending === this.concurrency) return;
+
+ if (this.jobs.length) {
+ const job = this.jobs.shift();
+
+ this.pending++;
+ job(this[kDone]);
+ }
+ }
+}
+
+module.exports = Limiter;
diff --git a/testing/xpcshell/node-ws/lib/permessage-deflate.js b/testing/xpcshell/node-ws/lib/permessage-deflate.js
new file mode 100644
index 0000000000..94603c98da
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/permessage-deflate.js
@@ -0,0 +1,511 @@
+'use strict';
+
+const zlib = require('zlib');
+
+const bufferUtil = require('./buffer-util');
+const Limiter = require('./limiter');
+const { kStatusCode } = require('./constants');
+
+const TRAILER = Buffer.from([0x00, 0x00, 0xff, 0xff]);
+const kPerMessageDeflate = Symbol('permessage-deflate');
+const kTotalLength = Symbol('total-length');
+const kCallback = Symbol('callback');
+const kBuffers = Symbol('buffers');
+const kError = Symbol('error');
+
+//
+// We limit zlib concurrency, which prevents severe memory fragmentation
+// as documented in https://github.com/nodejs/node/issues/8871#issuecomment-250915913
+// and https://github.com/websockets/ws/issues/1202
+//
+// Intentionally global; it's the global thread pool that's an issue.
+//
+let zlibLimiter;
+
+/**
+ * permessage-deflate implementation.
+ */
+class PerMessageDeflate {
+ /**
+ * Creates a PerMessageDeflate instance.
+ *
+ * @param {Object} [options] Configuration options
+ * @param {(Boolean|Number)} [options.clientMaxWindowBits] Advertise support
+ * for, or request, a custom client window size
+ * @param {Boolean} [options.clientNoContextTakeover=false] Advertise/
+ * acknowledge disabling of client context takeover
+ * @param {Number} [options.concurrencyLimit=10] The number of concurrent
+ * calls to zlib
+ * @param {(Boolean|Number)} [options.serverMaxWindowBits] Request/confirm the
+ * use of a custom server window size
+ * @param {Boolean} [options.serverNoContextTakeover=false] Request/accept
+ * disabling of server context takeover
+ * @param {Number} [options.threshold=1024] Size (in bytes) below which
+ * messages should not be compressed if context takeover is disabled
+ * @param {Object} [options.zlibDeflateOptions] Options to pass to zlib on
+ * deflate
+ * @param {Object} [options.zlibInflateOptions] Options to pass to zlib on
+ * inflate
+ * @param {Boolean} [isServer=false] Create the instance in either server or
+ * client mode
+ * @param {Number} [maxPayload=0] The maximum allowed message length
+ */
+ constructor(options, isServer, maxPayload) {
+ this._maxPayload = maxPayload | 0;
+ this._options = options || {};
+ this._threshold =
+ this._options.threshold !== undefined ? this._options.threshold : 1024;
+ this._isServer = !!isServer;
+ this._deflate = null;
+ this._inflate = null;
+
+ this.params = null;
+
+ if (!zlibLimiter) {
+ const concurrency =
+ this._options.concurrencyLimit !== undefined
+ ? this._options.concurrencyLimit
+ : 10;
+ zlibLimiter = new Limiter(concurrency);
+ }
+ }
+
+ /**
+ * @type {String}
+ */
+ static get extensionName() {
+ return 'permessage-deflate';
+ }
+
+ /**
+ * Create an extension negotiation offer.
+ *
+ * @return {Object} Extension parameters
+ * @public
+ */
+ offer() {
+ const params = {};
+
+ if (this._options.serverNoContextTakeover) {
+ params.server_no_context_takeover = true;
+ }
+ if (this._options.clientNoContextTakeover) {
+ params.client_no_context_takeover = true;
+ }
+ if (this._options.serverMaxWindowBits) {
+ params.server_max_window_bits = this._options.serverMaxWindowBits;
+ }
+ if (this._options.clientMaxWindowBits) {
+ params.client_max_window_bits = this._options.clientMaxWindowBits;
+ } else if (this._options.clientMaxWindowBits == null) {
+ params.client_max_window_bits = true;
+ }
+
+ return params;
+ }
+
+ /**
+ * Accept an extension negotiation offer/response.
+ *
+ * @param {Array} configurations The extension negotiation offers/reponse
+ * @return {Object} Accepted configuration
+ * @public
+ */
+ accept(configurations) {
+ configurations = this.normalizeParams(configurations);
+
+ this.params = this._isServer
+ ? this.acceptAsServer(configurations)
+ : this.acceptAsClient(configurations);
+
+ return this.params;
+ }
+
+ /**
+ * Releases all resources used by the extension.
+ *
+ * @public
+ */
+ cleanup() {
+ if (this._inflate) {
+ this._inflate.close();
+ this._inflate = null;
+ }
+
+ if (this._deflate) {
+ const callback = this._deflate[kCallback];
+
+ this._deflate.close();
+ this._deflate = null;
+
+ if (callback) {
+ callback(
+ new Error(
+ 'The deflate stream was closed while data was being processed'
+ )
+ );
+ }
+ }
+ }
+
+ /**
+ * Accept an extension negotiation offer.
+ *
+ * @param {Array} offers The extension negotiation offers
+ * @return {Object} Accepted configuration
+ * @private
+ */
+ acceptAsServer(offers) {
+ const opts = this._options;
+ const accepted = offers.find((params) => {
+ if (
+ (opts.serverNoContextTakeover === false &&
+ params.server_no_context_takeover) ||
+ (params.server_max_window_bits &&
+ (opts.serverMaxWindowBits === false ||
+ (typeof opts.serverMaxWindowBits === 'number' &&
+ opts.serverMaxWindowBits > params.server_max_window_bits))) ||
+ (typeof opts.clientMaxWindowBits === 'number' &&
+ !params.client_max_window_bits)
+ ) {
+ return false;
+ }
+
+ return true;
+ });
+
+ if (!accepted) {
+ throw new Error('None of the extension offers can be accepted');
+ }
+
+ if (opts.serverNoContextTakeover) {
+ accepted.server_no_context_takeover = true;
+ }
+ if (opts.clientNoContextTakeover) {
+ accepted.client_no_context_takeover = true;
+ }
+ if (typeof opts.serverMaxWindowBits === 'number') {
+ accepted.server_max_window_bits = opts.serverMaxWindowBits;
+ }
+ if (typeof opts.clientMaxWindowBits === 'number') {
+ accepted.client_max_window_bits = opts.clientMaxWindowBits;
+ } else if (
+ accepted.client_max_window_bits === true ||
+ opts.clientMaxWindowBits === false
+ ) {
+ delete accepted.client_max_window_bits;
+ }
+
+ return accepted;
+ }
+
+ /**
+ * Accept the extension negotiation response.
+ *
+ * @param {Array} response The extension negotiation response
+ * @return {Object} Accepted configuration
+ * @private
+ */
+ acceptAsClient(response) {
+ const params = response[0];
+
+ if (
+ this._options.clientNoContextTakeover === false &&
+ params.client_no_context_takeover
+ ) {
+ throw new Error('Unexpected parameter "client_no_context_takeover"');
+ }
+
+ if (!params.client_max_window_bits) {
+ if (typeof this._options.clientMaxWindowBits === 'number') {
+ params.client_max_window_bits = this._options.clientMaxWindowBits;
+ }
+ } else if (
+ this._options.clientMaxWindowBits === false ||
+ (typeof this._options.clientMaxWindowBits === 'number' &&
+ params.client_max_window_bits > this._options.clientMaxWindowBits)
+ ) {
+ throw new Error(
+ 'Unexpected or invalid parameter "client_max_window_bits"'
+ );
+ }
+
+ return params;
+ }
+
+ /**
+ * Normalize parameters.
+ *
+ * @param {Array} configurations The extension negotiation offers/reponse
+ * @return {Array} The offers/response with normalized parameters
+ * @private
+ */
+ normalizeParams(configurations) {
+ configurations.forEach((params) => {
+ Object.keys(params).forEach((key) => {
+ let value = params[key];
+
+ if (value.length > 1) {
+ throw new Error(`Parameter "${key}" must have only a single value`);
+ }
+
+ value = value[0];
+
+ if (key === 'client_max_window_bits') {
+ if (value !== true) {
+ const num = +value;
+ if (!Number.isInteger(num) || num < 8 || num > 15) {
+ throw new TypeError(
+ `Invalid value for parameter "${key}": ${value}`
+ );
+ }
+ value = num;
+ } else if (!this._isServer) {
+ throw new TypeError(
+ `Invalid value for parameter "${key}": ${value}`
+ );
+ }
+ } else if (key === 'server_max_window_bits') {
+ const num = +value;
+ if (!Number.isInteger(num) || num < 8 || num > 15) {
+ throw new TypeError(
+ `Invalid value for parameter "${key}": ${value}`
+ );
+ }
+ value = num;
+ } else if (
+ key === 'client_no_context_takeover' ||
+ key === 'server_no_context_takeover'
+ ) {
+ if (value !== true) {
+ throw new TypeError(
+ `Invalid value for parameter "${key}": ${value}`
+ );
+ }
+ } else {
+ throw new Error(`Unknown parameter "${key}"`);
+ }
+
+ params[key] = value;
+ });
+ });
+
+ return configurations;
+ }
+
+ /**
+ * Decompress data. Concurrency limited.
+ *
+ * @param {Buffer} data Compressed data
+ * @param {Boolean} fin Specifies whether or not this is the last fragment
+ * @param {Function} callback Callback
+ * @public
+ */
+ decompress(data, fin, callback) {
+ zlibLimiter.add((done) => {
+ this._decompress(data, fin, (err, result) => {
+ done();
+ callback(err, result);
+ });
+ });
+ }
+
+ /**
+ * Compress data. Concurrency limited.
+ *
+ * @param {(Buffer|String)} data Data to compress
+ * @param {Boolean} fin Specifies whether or not this is the last fragment
+ * @param {Function} callback Callback
+ * @public
+ */
+ compress(data, fin, callback) {
+ zlibLimiter.add((done) => {
+ this._compress(data, fin, (err, result) => {
+ done();
+ callback(err, result);
+ });
+ });
+ }
+
+ /**
+ * Decompress data.
+ *
+ * @param {Buffer} data Compressed data
+ * @param {Boolean} fin Specifies whether or not this is the last fragment
+ * @param {Function} callback Callback
+ * @private
+ */
+ _decompress(data, fin, callback) {
+ const endpoint = this._isServer ? 'client' : 'server';
+
+ if (!this._inflate) {
+ const key = `${endpoint}_max_window_bits`;
+ const windowBits =
+ typeof this.params[key] !== 'number'
+ ? zlib.Z_DEFAULT_WINDOWBITS
+ : this.params[key];
+
+ this._inflate = zlib.createInflateRaw({
+ ...this._options.zlibInflateOptions,
+ windowBits
+ });
+ this._inflate[kPerMessageDeflate] = this;
+ this._inflate[kTotalLength] = 0;
+ this._inflate[kBuffers] = [];
+ this._inflate.on('error', inflateOnError);
+ this._inflate.on('data', inflateOnData);
+ }
+
+ this._inflate[kCallback] = callback;
+
+ this._inflate.write(data);
+ if (fin) this._inflate.write(TRAILER);
+
+ this._inflate.flush(() => {
+ const err = this._inflate[kError];
+
+ if (err) {
+ this._inflate.close();
+ this._inflate = null;
+ callback(err);
+ return;
+ }
+
+ const data = bufferUtil.concat(
+ this._inflate[kBuffers],
+ this._inflate[kTotalLength]
+ );
+
+ if (this._inflate._readableState.endEmitted) {
+ this._inflate.close();
+ this._inflate = null;
+ } else {
+ this._inflate[kTotalLength] = 0;
+ this._inflate[kBuffers] = [];
+
+ if (fin && this.params[`${endpoint}_no_context_takeover`]) {
+ this._inflate.reset();
+ }
+ }
+
+ callback(null, data);
+ });
+ }
+
+ /**
+ * Compress data.
+ *
+ * @param {(Buffer|String)} data Data to compress
+ * @param {Boolean} fin Specifies whether or not this is the last fragment
+ * @param {Function} callback Callback
+ * @private
+ */
+ _compress(data, fin, callback) {
+ const endpoint = this._isServer ? 'server' : 'client';
+
+ if (!this._deflate) {
+ const key = `${endpoint}_max_window_bits`;
+ const windowBits =
+ typeof this.params[key] !== 'number'
+ ? zlib.Z_DEFAULT_WINDOWBITS
+ : this.params[key];
+
+ this._deflate = zlib.createDeflateRaw({
+ ...this._options.zlibDeflateOptions,
+ windowBits
+ });
+
+ this._deflate[kTotalLength] = 0;
+ this._deflate[kBuffers] = [];
+
+ this._deflate.on('data', deflateOnData);
+ }
+
+ this._deflate[kCallback] = callback;
+
+ this._deflate.write(data);
+ this._deflate.flush(zlib.Z_SYNC_FLUSH, () => {
+ if (!this._deflate) {
+ //
+ // The deflate stream was closed while data was being processed.
+ //
+ return;
+ }
+
+ let data = bufferUtil.concat(
+ this._deflate[kBuffers],
+ this._deflate[kTotalLength]
+ );
+
+ if (fin) data = data.slice(0, data.length - 4);
+
+ //
+ // Ensure that the callback will not be called again in
+ // `PerMessageDeflate#cleanup()`.
+ //
+ this._deflate[kCallback] = null;
+
+ this._deflate[kTotalLength] = 0;
+ this._deflate[kBuffers] = [];
+
+ if (fin && this.params[`${endpoint}_no_context_takeover`]) {
+ this._deflate.reset();
+ }
+
+ callback(null, data);
+ });
+ }
+}
+
+module.exports = PerMessageDeflate;
+
+/**
+ * The listener of the `zlib.DeflateRaw` stream `'data'` event.
+ *
+ * @param {Buffer} chunk A chunk of data
+ * @private
+ */
+function deflateOnData(chunk) {
+ this[kBuffers].push(chunk);
+ this[kTotalLength] += chunk.length;
+}
+
+/**
+ * The listener of the `zlib.InflateRaw` stream `'data'` event.
+ *
+ * @param {Buffer} chunk A chunk of data
+ * @private
+ */
+function inflateOnData(chunk) {
+ this[kTotalLength] += chunk.length;
+
+ if (
+ this[kPerMessageDeflate]._maxPayload < 1 ||
+ this[kTotalLength] <= this[kPerMessageDeflate]._maxPayload
+ ) {
+ this[kBuffers].push(chunk);
+ return;
+ }
+
+ this[kError] = new RangeError('Max payload size exceeded');
+ this[kError].code = 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH';
+ this[kError][kStatusCode] = 1009;
+ this.removeListener('data', inflateOnData);
+ this.reset();
+}
+
+/**
+ * The listener of the `zlib.InflateRaw` stream `'error'` event.
+ *
+ * @param {Error} err The emitted error
+ * @private
+ */
+function inflateOnError(err) {
+ //
+ // There is no need to call `Zlib#close()` as the handle is automatically
+ // closed when an error is emitted.
+ //
+ this[kPerMessageDeflate]._inflate = null;
+ err[kStatusCode] = 1007;
+ this[kCallback](err);
+}
diff --git a/testing/xpcshell/node-ws/lib/receiver.js b/testing/xpcshell/node-ws/lib/receiver.js
new file mode 100644
index 0000000000..2d29d62bb0
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/receiver.js
@@ -0,0 +1,618 @@
+'use strict';
+
+const { Writable } = require('stream');
+
+const PerMessageDeflate = require('./permessage-deflate');
+const {
+ BINARY_TYPES,
+ EMPTY_BUFFER,
+ kStatusCode,
+ kWebSocket
+} = require('./constants');
+const { concat, toArrayBuffer, unmask } = require('./buffer-util');
+const { isValidStatusCode, isValidUTF8 } = require('./validation');
+
+const GET_INFO = 0;
+const GET_PAYLOAD_LENGTH_16 = 1;
+const GET_PAYLOAD_LENGTH_64 = 2;
+const GET_MASK = 3;
+const GET_DATA = 4;
+const INFLATING = 5;
+
+/**
+ * HyBi Receiver implementation.
+ *
+ * @extends Writable
+ */
+class Receiver extends Writable {
+ /**
+ * Creates a Receiver instance.
+ *
+ * @param {Object} [options] Options object
+ * @param {String} [options.binaryType=nodebuffer] The type for binary data
+ * @param {Object} [options.extensions] An object containing the negotiated
+ * extensions
+ * @param {Boolean} [options.isServer=false] Specifies whether to operate in
+ * client or server mode
+ * @param {Number} [options.maxPayload=0] The maximum allowed message length
+ * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or
+ * not to skip UTF-8 validation for text and close messages
+ */
+ constructor(options = {}) {
+ super();
+
+ this._binaryType = options.binaryType || BINARY_TYPES[0];
+ this._extensions = options.extensions || {};
+ this._isServer = !!options.isServer;
+ this._maxPayload = options.maxPayload | 0;
+ this._skipUTF8Validation = !!options.skipUTF8Validation;
+ this[kWebSocket] = undefined;
+
+ this._bufferedBytes = 0;
+ this._buffers = [];
+
+ this._compressed = false;
+ this._payloadLength = 0;
+ this._mask = undefined;
+ this._fragmented = 0;
+ this._masked = false;
+ this._fin = false;
+ this._opcode = 0;
+
+ this._totalPayloadLength = 0;
+ this._messageLength = 0;
+ this._fragments = [];
+
+ this._state = GET_INFO;
+ this._loop = false;
+ }
+
+ /**
+ * Implements `Writable.prototype._write()`.
+ *
+ * @param {Buffer} chunk The chunk of data to write
+ * @param {String} encoding The character encoding of `chunk`
+ * @param {Function} cb Callback
+ * @private
+ */
+ _write(chunk, encoding, cb) {
+ if (this._opcode === 0x08 && this._state == GET_INFO) return cb();
+
+ this._bufferedBytes += chunk.length;
+ this._buffers.push(chunk);
+ this.startLoop(cb);
+ }
+
+ /**
+ * Consumes `n` bytes from the buffered data.
+ *
+ * @param {Number} n The number of bytes to consume
+ * @return {Buffer} The consumed bytes
+ * @private
+ */
+ consume(n) {
+ this._bufferedBytes -= n;
+
+ if (n === this._buffers[0].length) return this._buffers.shift();
+
+ if (n < this._buffers[0].length) {
+ const buf = this._buffers[0];
+ this._buffers[0] = buf.slice(n);
+ return buf.slice(0, n);
+ }
+
+ const dst = Buffer.allocUnsafe(n);
+
+ do {
+ const buf = this._buffers[0];
+ const offset = dst.length - n;
+
+ if (n >= buf.length) {
+ dst.set(this._buffers.shift(), offset);
+ } else {
+ dst.set(new Uint8Array(buf.buffer, buf.byteOffset, n), offset);
+ this._buffers[0] = buf.slice(n);
+ }
+
+ n -= buf.length;
+ } while (n > 0);
+
+ return dst;
+ }
+
+ /**
+ * Starts the parsing loop.
+ *
+ * @param {Function} cb Callback
+ * @private
+ */
+ startLoop(cb) {
+ let err;
+ this._loop = true;
+
+ do {
+ switch (this._state) {
+ case GET_INFO:
+ err = this.getInfo();
+ break;
+ case GET_PAYLOAD_LENGTH_16:
+ err = this.getPayloadLength16();
+ break;
+ case GET_PAYLOAD_LENGTH_64:
+ err = this.getPayloadLength64();
+ break;
+ case GET_MASK:
+ this.getMask();
+ break;
+ case GET_DATA:
+ err = this.getData(cb);
+ break;
+ default:
+ // `INFLATING`
+ this._loop = false;
+ return;
+ }
+ } while (this._loop);
+
+ cb(err);
+ }
+
+ /**
+ * Reads the first two bytes of a frame.
+ *
+ * @return {(RangeError|undefined)} A possible error
+ * @private
+ */
+ getInfo() {
+ if (this._bufferedBytes < 2) {
+ this._loop = false;
+ return;
+ }
+
+ const buf = this.consume(2);
+
+ if ((buf[0] & 0x30) !== 0x00) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'RSV2 and RSV3 must be clear',
+ true,
+ 1002,
+ 'WS_ERR_UNEXPECTED_RSV_2_3'
+ );
+ }
+
+ const compressed = (buf[0] & 0x40) === 0x40;
+
+ if (compressed && !this._extensions[PerMessageDeflate.extensionName]) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'RSV1 must be clear',
+ true,
+ 1002,
+ 'WS_ERR_UNEXPECTED_RSV_1'
+ );
+ }
+
+ this._fin = (buf[0] & 0x80) === 0x80;
+ this._opcode = buf[0] & 0x0f;
+ this._payloadLength = buf[1] & 0x7f;
+
+ if (this._opcode === 0x00) {
+ if (compressed) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'RSV1 must be clear',
+ true,
+ 1002,
+ 'WS_ERR_UNEXPECTED_RSV_1'
+ );
+ }
+
+ if (!this._fragmented) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'invalid opcode 0',
+ true,
+ 1002,
+ 'WS_ERR_INVALID_OPCODE'
+ );
+ }
+
+ this._opcode = this._fragmented;
+ } else if (this._opcode === 0x01 || this._opcode === 0x02) {
+ if (this._fragmented) {
+ this._loop = false;
+ return error(
+ RangeError,
+ `invalid opcode ${this._opcode}`,
+ true,
+ 1002,
+ 'WS_ERR_INVALID_OPCODE'
+ );
+ }
+
+ this._compressed = compressed;
+ } else if (this._opcode > 0x07 && this._opcode < 0x0b) {
+ if (!this._fin) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'FIN must be set',
+ true,
+ 1002,
+ 'WS_ERR_EXPECTED_FIN'
+ );
+ }
+
+ if (compressed) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'RSV1 must be clear',
+ true,
+ 1002,
+ 'WS_ERR_UNEXPECTED_RSV_1'
+ );
+ }
+
+ if (this._payloadLength > 0x7d) {
+ this._loop = false;
+ return error(
+ RangeError,
+ `invalid payload length ${this._payloadLength}`,
+ true,
+ 1002,
+ 'WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH'
+ );
+ }
+ } else {
+ this._loop = false;
+ return error(
+ RangeError,
+ `invalid opcode ${this._opcode}`,
+ true,
+ 1002,
+ 'WS_ERR_INVALID_OPCODE'
+ );
+ }
+
+ if (!this._fin && !this._fragmented) this._fragmented = this._opcode;
+ this._masked = (buf[1] & 0x80) === 0x80;
+
+ if (this._isServer) {
+ if (!this._masked) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'MASK must be set',
+ true,
+ 1002,
+ 'WS_ERR_EXPECTED_MASK'
+ );
+ }
+ } else if (this._masked) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'MASK must be clear',
+ true,
+ 1002,
+ 'WS_ERR_UNEXPECTED_MASK'
+ );
+ }
+
+ if (this._payloadLength === 126) this._state = GET_PAYLOAD_LENGTH_16;
+ else if (this._payloadLength === 127) this._state = GET_PAYLOAD_LENGTH_64;
+ else return this.haveLength();
+ }
+
+ /**
+ * Gets extended payload length (7+16).
+ *
+ * @return {(RangeError|undefined)} A possible error
+ * @private
+ */
+ getPayloadLength16() {
+ if (this._bufferedBytes < 2) {
+ this._loop = false;
+ return;
+ }
+
+ this._payloadLength = this.consume(2).readUInt16BE(0);
+ return this.haveLength();
+ }
+
+ /**
+ * Gets extended payload length (7+64).
+ *
+ * @return {(RangeError|undefined)} A possible error
+ * @private
+ */
+ getPayloadLength64() {
+ if (this._bufferedBytes < 8) {
+ this._loop = false;
+ return;
+ }
+
+ const buf = this.consume(8);
+ const num = buf.readUInt32BE(0);
+
+ //
+ // The maximum safe integer in JavaScript is 2^53 - 1. An error is returned
+ // if payload length is greater than this number.
+ //
+ if (num > Math.pow(2, 53 - 32) - 1) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'Unsupported WebSocket frame: payload length > 2^53 - 1',
+ false,
+ 1009,
+ 'WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH'
+ );
+ }
+
+ this._payloadLength = num * Math.pow(2, 32) + buf.readUInt32BE(4);
+ return this.haveLength();
+ }
+
+ /**
+ * Payload length has been read.
+ *
+ * @return {(RangeError|undefined)} A possible error
+ * @private
+ */
+ haveLength() {
+ if (this._payloadLength && this._opcode < 0x08) {
+ this._totalPayloadLength += this._payloadLength;
+ if (this._totalPayloadLength > this._maxPayload && this._maxPayload > 0) {
+ this._loop = false;
+ return error(
+ RangeError,
+ 'Max payload size exceeded',
+ false,
+ 1009,
+ 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH'
+ );
+ }
+ }
+
+ if (this._masked) this._state = GET_MASK;
+ else this._state = GET_DATA;
+ }
+
+ /**
+ * Reads mask bytes.
+ *
+ * @private
+ */
+ getMask() {
+ if (this._bufferedBytes < 4) {
+ this._loop = false;
+ return;
+ }
+
+ this._mask = this.consume(4);
+ this._state = GET_DATA;
+ }
+
+ /**
+ * Reads data bytes.
+ *
+ * @param {Function} cb Callback
+ * @return {(Error|RangeError|undefined)} A possible error
+ * @private
+ */
+ getData(cb) {
+ let data = EMPTY_BUFFER;
+
+ if (this._payloadLength) {
+ if (this._bufferedBytes < this._payloadLength) {
+ this._loop = false;
+ return;
+ }
+
+ data = this.consume(this._payloadLength);
+
+ if (
+ this._masked &&
+ (this._mask[0] | this._mask[1] | this._mask[2] | this._mask[3]) !== 0
+ ) {
+ unmask(data, this._mask);
+ }
+ }
+
+ if (this._opcode > 0x07) return this.controlMessage(data);
+
+ if (this._compressed) {
+ this._state = INFLATING;
+ this.decompress(data, cb);
+ return;
+ }
+
+ if (data.length) {
+ //
+ // This message is not compressed so its length is the sum of the payload
+ // length of all fragments.
+ //
+ this._messageLength = this._totalPayloadLength;
+ this._fragments.push(data);
+ }
+
+ return this.dataMessage();
+ }
+
+ /**
+ * Decompresses data.
+ *
+ * @param {Buffer} data Compressed data
+ * @param {Function} cb Callback
+ * @private
+ */
+ decompress(data, cb) {
+ const perMessageDeflate = this._extensions[PerMessageDeflate.extensionName];
+
+ perMessageDeflate.decompress(data, this._fin, (err, buf) => {
+ if (err) return cb(err);
+
+ if (buf.length) {
+ this._messageLength += buf.length;
+ if (this._messageLength > this._maxPayload && this._maxPayload > 0) {
+ return cb(
+ error(
+ RangeError,
+ 'Max payload size exceeded',
+ false,
+ 1009,
+ 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH'
+ )
+ );
+ }
+
+ this._fragments.push(buf);
+ }
+
+ const er = this.dataMessage();
+ if (er) return cb(er);
+
+ this.startLoop(cb);
+ });
+ }
+
+ /**
+ * Handles a data message.
+ *
+ * @return {(Error|undefined)} A possible error
+ * @private
+ */
+ dataMessage() {
+ if (this._fin) {
+ const messageLength = this._messageLength;
+ const fragments = this._fragments;
+
+ this._totalPayloadLength = 0;
+ this._messageLength = 0;
+ this._fragmented = 0;
+ this._fragments = [];
+
+ if (this._opcode === 2) {
+ let data;
+
+ if (this._binaryType === 'nodebuffer') {
+ data = concat(fragments, messageLength);
+ } else if (this._binaryType === 'arraybuffer') {
+ data = toArrayBuffer(concat(fragments, messageLength));
+ } else {
+ data = fragments;
+ }
+
+ this.emit('message', data, true);
+ } else {
+ const buf = concat(fragments, messageLength);
+
+ if (!this._skipUTF8Validation && !isValidUTF8(buf)) {
+ this._loop = false;
+ return error(
+ Error,
+ 'invalid UTF-8 sequence',
+ true,
+ 1007,
+ 'WS_ERR_INVALID_UTF8'
+ );
+ }
+
+ this.emit('message', buf, false);
+ }
+ }
+
+ this._state = GET_INFO;
+ }
+
+ /**
+ * Handles a control message.
+ *
+ * @param {Buffer} data Data to handle
+ * @return {(Error|RangeError|undefined)} A possible error
+ * @private
+ */
+ controlMessage(data) {
+ if (this._opcode === 0x08) {
+ this._loop = false;
+
+ if (data.length === 0) {
+ this.emit('conclude', 1005, EMPTY_BUFFER);
+ this.end();
+ } else if (data.length === 1) {
+ return error(
+ RangeError,
+ 'invalid payload length 1',
+ true,
+ 1002,
+ 'WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH'
+ );
+ } else {
+ const code = data.readUInt16BE(0);
+
+ if (!isValidStatusCode(code)) {
+ return error(
+ RangeError,
+ `invalid status code ${code}`,
+ true,
+ 1002,
+ 'WS_ERR_INVALID_CLOSE_CODE'
+ );
+ }
+
+ const buf = data.slice(2);
+
+ if (!this._skipUTF8Validation && !isValidUTF8(buf)) {
+ return error(
+ Error,
+ 'invalid UTF-8 sequence',
+ true,
+ 1007,
+ 'WS_ERR_INVALID_UTF8'
+ );
+ }
+
+ this.emit('conclude', code, buf);
+ this.end();
+ }
+ } else if (this._opcode === 0x09) {
+ this.emit('ping', data);
+ } else {
+ this.emit('pong', data);
+ }
+
+ this._state = GET_INFO;
+ }
+}
+
+module.exports = Receiver;
+
+/**
+ * Builds an error object.
+ *
+ * @param {function(new:Error|RangeError)} ErrorCtor The error constructor
+ * @param {String} message The error message
+ * @param {Boolean} prefix Specifies whether or not to add a default prefix to
+ * `message`
+ * @param {Number} statusCode The status code
+ * @param {String} errorCode The exposed error code
+ * @return {(Error|RangeError)} The error
+ * @private
+ */
+function error(ErrorCtor, message, prefix, statusCode, errorCode) {
+ const err = new ErrorCtor(
+ prefix ? `Invalid WebSocket frame: ${message}` : message
+ );
+
+ Error.captureStackTrace(err, error);
+ err.code = errorCode;
+ err[kStatusCode] = statusCode;
+ return err;
+}
diff --git a/testing/xpcshell/node-ws/lib/sender.js b/testing/xpcshell/node-ws/lib/sender.js
new file mode 100644
index 0000000000..c848853629
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/sender.js
@@ -0,0 +1,478 @@
+/* eslint no-unused-vars: ["error", { "varsIgnorePattern": "^net|tls$" }] */
+
+'use strict';
+
+const net = require('net');
+const tls = require('tls');
+const { randomFillSync } = require('crypto');
+
+const PerMessageDeflate = require('./permessage-deflate');
+const { EMPTY_BUFFER } = require('./constants');
+const { isValidStatusCode } = require('./validation');
+const { mask: applyMask, toBuffer } = require('./buffer-util');
+
+const kByteLength = Symbol('kByteLength');
+const maskBuffer = Buffer.alloc(4);
+
+/**
+ * HyBi Sender implementation.
+ */
+class Sender {
+ /**
+ * Creates a Sender instance.
+ *
+ * @param {(net.Socket|tls.Socket)} socket The connection socket
+ * @param {Object} [extensions] An object containing the negotiated extensions
+ * @param {Function} [generateMask] The function used to generate the masking
+ * key
+ */
+ constructor(socket, extensions, generateMask) {
+ this._extensions = extensions || {};
+
+ if (generateMask) {
+ this._generateMask = generateMask;
+ this._maskBuffer = Buffer.alloc(4);
+ }
+
+ this._socket = socket;
+
+ this._firstFragment = true;
+ this._compress = false;
+
+ this._bufferedBytes = 0;
+ this._deflating = false;
+ this._queue = [];
+ }
+
+ /**
+ * Frames a piece of data according to the HyBi WebSocket protocol.
+ *
+ * @param {(Buffer|String)} data The data to frame
+ * @param {Object} options Options object
+ * @param {Boolean} [options.fin=false] Specifies whether or not to set the
+ * FIN bit
+ * @param {Function} [options.generateMask] The function used to generate the
+ * masking key
+ * @param {Boolean} [options.mask=false] Specifies whether or not to mask
+ * `data`
+ * @param {Buffer} [options.maskBuffer] The buffer used to store the masking
+ * key
+ * @param {Number} options.opcode The opcode
+ * @param {Boolean} [options.readOnly=false] Specifies whether `data` can be
+ * modified
+ * @param {Boolean} [options.rsv1=false] Specifies whether or not to set the
+ * RSV1 bit
+ * @return {(Buffer|String)[]} The framed data
+ * @public
+ */
+ static frame(data, options) {
+ let mask;
+ let merge = false;
+ let offset = 2;
+ let skipMasking = false;
+
+ if (options.mask) {
+ mask = options.maskBuffer || maskBuffer;
+
+ if (options.generateMask) {
+ options.generateMask(mask);
+ } else {
+ randomFillSync(mask, 0, 4);
+ }
+
+ skipMasking = (mask[0] | mask[1] | mask[2] | mask[3]) === 0;
+ offset = 6;
+ }
+
+ let dataLength;
+
+ if (typeof data === 'string') {
+ if (
+ (!options.mask || skipMasking) &&
+ options[kByteLength] !== undefined
+ ) {
+ dataLength = options[kByteLength];
+ } else {
+ data = Buffer.from(data);
+ dataLength = data.length;
+ }
+ } else {
+ dataLength = data.length;
+ merge = options.mask && options.readOnly && !skipMasking;
+ }
+
+ let payloadLength = dataLength;
+
+ if (dataLength >= 65536) {
+ offset += 8;
+ payloadLength = 127;
+ } else if (dataLength > 125) {
+ offset += 2;
+ payloadLength = 126;
+ }
+
+ const target = Buffer.allocUnsafe(merge ? dataLength + offset : offset);
+
+ target[0] = options.fin ? options.opcode | 0x80 : options.opcode;
+ if (options.rsv1) target[0] |= 0x40;
+
+ target[1] = payloadLength;
+
+ if (payloadLength === 126) {
+ target.writeUInt16BE(dataLength, 2);
+ } else if (payloadLength === 127) {
+ target[2] = target[3] = 0;
+ target.writeUIntBE(dataLength, 4, 6);
+ }
+
+ if (!options.mask) return [target, data];
+
+ target[1] |= 0x80;
+ target[offset - 4] = mask[0];
+ target[offset - 3] = mask[1];
+ target[offset - 2] = mask[2];
+ target[offset - 1] = mask[3];
+
+ if (skipMasking) return [target, data];
+
+ if (merge) {
+ applyMask(data, mask, target, offset, dataLength);
+ return [target];
+ }
+
+ applyMask(data, mask, data, 0, dataLength);
+ return [target, data];
+ }
+
+ /**
+ * Sends a close message to the other peer.
+ *
+ * @param {Number} [code] The status code component of the body
+ * @param {(String|Buffer)} [data] The message component of the body
+ * @param {Boolean} [mask=false] Specifies whether or not to mask the message
+ * @param {Function} [cb] Callback
+ * @public
+ */
+ close(code, data, mask, cb) {
+ let buf;
+
+ if (code === undefined) {
+ buf = EMPTY_BUFFER;
+ } else if (typeof code !== 'number' || !isValidStatusCode(code)) {
+ throw new TypeError('First argument must be a valid error code number');
+ } else if (data === undefined || !data.length) {
+ buf = Buffer.allocUnsafe(2);
+ buf.writeUInt16BE(code, 0);
+ } else {
+ const length = Buffer.byteLength(data);
+
+ if (length > 123) {
+ throw new RangeError('The message must not be greater than 123 bytes');
+ }
+
+ buf = Buffer.allocUnsafe(2 + length);
+ buf.writeUInt16BE(code, 0);
+
+ if (typeof data === 'string') {
+ buf.write(data, 2);
+ } else {
+ buf.set(data, 2);
+ }
+ }
+
+ const options = {
+ [kByteLength]: buf.length,
+ fin: true,
+ generateMask: this._generateMask,
+ mask,
+ maskBuffer: this._maskBuffer,
+ opcode: 0x08,
+ readOnly: false,
+ rsv1: false
+ };
+
+ if (this._deflating) {
+ this.enqueue([this.dispatch, buf, false, options, cb]);
+ } else {
+ this.sendFrame(Sender.frame(buf, options), cb);
+ }
+ }
+
+ /**
+ * Sends a ping message to the other peer.
+ *
+ * @param {*} data The message to send
+ * @param {Boolean} [mask=false] Specifies whether or not to mask `data`
+ * @param {Function} [cb] Callback
+ * @public
+ */
+ ping(data, mask, cb) {
+ let byteLength;
+ let readOnly;
+
+ if (typeof data === 'string') {
+ byteLength = Buffer.byteLength(data);
+ readOnly = false;
+ } else {
+ data = toBuffer(data);
+ byteLength = data.length;
+ readOnly = toBuffer.readOnly;
+ }
+
+ if (byteLength > 125) {
+ throw new RangeError('The data size must not be greater than 125 bytes');
+ }
+
+ const options = {
+ [kByteLength]: byteLength,
+ fin: true,
+ generateMask: this._generateMask,
+ mask,
+ maskBuffer: this._maskBuffer,
+ opcode: 0x09,
+ readOnly,
+ rsv1: false
+ };
+
+ if (this._deflating) {
+ this.enqueue([this.dispatch, data, false, options, cb]);
+ } else {
+ this.sendFrame(Sender.frame(data, options), cb);
+ }
+ }
+
+ /**
+ * Sends a pong message to the other peer.
+ *
+ * @param {*} data The message to send
+ * @param {Boolean} [mask=false] Specifies whether or not to mask `data`
+ * @param {Function} [cb] Callback
+ * @public
+ */
+ pong(data, mask, cb) {
+ let byteLength;
+ let readOnly;
+
+ if (typeof data === 'string') {
+ byteLength = Buffer.byteLength(data);
+ readOnly = false;
+ } else {
+ data = toBuffer(data);
+ byteLength = data.length;
+ readOnly = toBuffer.readOnly;
+ }
+
+ if (byteLength > 125) {
+ throw new RangeError('The data size must not be greater than 125 bytes');
+ }
+
+ const options = {
+ [kByteLength]: byteLength,
+ fin: true,
+ generateMask: this._generateMask,
+ mask,
+ maskBuffer: this._maskBuffer,
+ opcode: 0x0a,
+ readOnly,
+ rsv1: false
+ };
+
+ if (this._deflating) {
+ this.enqueue([this.dispatch, data, false, options, cb]);
+ } else {
+ this.sendFrame(Sender.frame(data, options), cb);
+ }
+ }
+
+ /**
+ * Sends a data message to the other peer.
+ *
+ * @param {*} data The message to send
+ * @param {Object} options Options object
+ * @param {Boolean} [options.binary=false] Specifies whether `data` is binary
+ * or text
+ * @param {Boolean} [options.compress=false] Specifies whether or not to
+ * compress `data`
+ * @param {Boolean} [options.fin=false] Specifies whether the fragment is the
+ * last one
+ * @param {Boolean} [options.mask=false] Specifies whether or not to mask
+ * `data`
+ * @param {Function} [cb] Callback
+ * @public
+ */
+ send(data, options, cb) {
+ const perMessageDeflate = this._extensions[PerMessageDeflate.extensionName];
+ let opcode = options.binary ? 2 : 1;
+ let rsv1 = options.compress;
+
+ let byteLength;
+ let readOnly;
+
+ if (typeof data === 'string') {
+ byteLength = Buffer.byteLength(data);
+ readOnly = false;
+ } else {
+ data = toBuffer(data);
+ byteLength = data.length;
+ readOnly = toBuffer.readOnly;
+ }
+
+ if (this._firstFragment) {
+ this._firstFragment = false;
+ if (
+ rsv1 &&
+ perMessageDeflate &&
+ perMessageDeflate.params[
+ perMessageDeflate._isServer
+ ? 'server_no_context_takeover'
+ : 'client_no_context_takeover'
+ ]
+ ) {
+ rsv1 = byteLength >= perMessageDeflate._threshold;
+ }
+ this._compress = rsv1;
+ } else {
+ rsv1 = false;
+ opcode = 0;
+ }
+
+ if (options.fin) this._firstFragment = true;
+
+ if (perMessageDeflate) {
+ const opts = {
+ [kByteLength]: byteLength,
+ fin: options.fin,
+ generateMask: this._generateMask,
+ mask: options.mask,
+ maskBuffer: this._maskBuffer,
+ opcode,
+ readOnly,
+ rsv1
+ };
+
+ if (this._deflating) {
+ this.enqueue([this.dispatch, data, this._compress, opts, cb]);
+ } else {
+ this.dispatch(data, this._compress, opts, cb);
+ }
+ } else {
+ this.sendFrame(
+ Sender.frame(data, {
+ [kByteLength]: byteLength,
+ fin: options.fin,
+ generateMask: this._generateMask,
+ mask: options.mask,
+ maskBuffer: this._maskBuffer,
+ opcode,
+ readOnly,
+ rsv1: false
+ }),
+ cb
+ );
+ }
+ }
+
+ /**
+ * Dispatches a message.
+ *
+ * @param {(Buffer|String)} data The message to send
+ * @param {Boolean} [compress=false] Specifies whether or not to compress
+ * `data`
+ * @param {Object} options Options object
+ * @param {Boolean} [options.fin=false] Specifies whether or not to set the
+ * FIN bit
+ * @param {Function} [options.generateMask] The function used to generate the
+ * masking key
+ * @param {Boolean} [options.mask=false] Specifies whether or not to mask
+ * `data`
+ * @param {Buffer} [options.maskBuffer] The buffer used to store the masking
+ * key
+ * @param {Number} options.opcode The opcode
+ * @param {Boolean} [options.readOnly=false] Specifies whether `data` can be
+ * modified
+ * @param {Boolean} [options.rsv1=false] Specifies whether or not to set the
+ * RSV1 bit
+ * @param {Function} [cb] Callback
+ * @private
+ */
+ dispatch(data, compress, options, cb) {
+ if (!compress) {
+ this.sendFrame(Sender.frame(data, options), cb);
+ return;
+ }
+
+ const perMessageDeflate = this._extensions[PerMessageDeflate.extensionName];
+
+ this._bufferedBytes += options[kByteLength];
+ this._deflating = true;
+ perMessageDeflate.compress(data, options.fin, (_, buf) => {
+ if (this._socket.destroyed) {
+ const err = new Error(
+ 'The socket was closed while data was being compressed'
+ );
+
+ if (typeof cb === 'function') cb(err);
+
+ for (let i = 0; i < this._queue.length; i++) {
+ const params = this._queue[i];
+ const callback = params[params.length - 1];
+
+ if (typeof callback === 'function') callback(err);
+ }
+
+ return;
+ }
+
+ this._bufferedBytes -= options[kByteLength];
+ this._deflating = false;
+ options.readOnly = false;
+ this.sendFrame(Sender.frame(buf, options), cb);
+ this.dequeue();
+ });
+ }
+
+ /**
+ * Executes queued send operations.
+ *
+ * @private
+ */
+ dequeue() {
+ while (!this._deflating && this._queue.length) {
+ const params = this._queue.shift();
+
+ this._bufferedBytes -= params[3][kByteLength];
+ Reflect.apply(params[0], this, params.slice(1));
+ }
+ }
+
+ /**
+ * Enqueues a send operation.
+ *
+ * @param {Array} params Send operation parameters.
+ * @private
+ */
+ enqueue(params) {
+ this._bufferedBytes += params[3][kByteLength];
+ this._queue.push(params);
+ }
+
+ /**
+ * Sends a frame.
+ *
+ * @param {Buffer[]} list The frame to send
+ * @param {Function} [cb] Callback
+ * @private
+ */
+ sendFrame(list, cb) {
+ if (list.length === 2) {
+ this._socket.cork();
+ this._socket.write(list[0]);
+ this._socket.write(list[1], cb);
+ this._socket.uncork();
+ } else {
+ this._socket.write(list[0], cb);
+ }
+ }
+}
+
+module.exports = Sender;
diff --git a/testing/xpcshell/node-ws/lib/stream.js b/testing/xpcshell/node-ws/lib/stream.js
new file mode 100644
index 0000000000..230734b79a
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/stream.js
@@ -0,0 +1,159 @@
+'use strict';
+
+const { Duplex } = require('stream');
+
+/**
+ * Emits the `'close'` event on a stream.
+ *
+ * @param {Duplex} stream The stream.
+ * @private
+ */
+function emitClose(stream) {
+ stream.emit('close');
+}
+
+/**
+ * The listener of the `'end'` event.
+ *
+ * @private
+ */
+function duplexOnEnd() {
+ if (!this.destroyed && this._writableState.finished) {
+ this.destroy();
+ }
+}
+
+/**
+ * The listener of the `'error'` event.
+ *
+ * @param {Error} err The error
+ * @private
+ */
+function duplexOnError(err) {
+ this.removeListener('error', duplexOnError);
+ this.destroy();
+ if (this.listenerCount('error') === 0) {
+ // Do not suppress the throwing behavior.
+ this.emit('error', err);
+ }
+}
+
+/**
+ * Wraps a `WebSocket` in a duplex stream.
+ *
+ * @param {WebSocket} ws The `WebSocket` to wrap
+ * @param {Object} [options] The options for the `Duplex` constructor
+ * @return {Duplex} The duplex stream
+ * @public
+ */
+function createWebSocketStream(ws, options) {
+ let terminateOnDestroy = true;
+
+ const duplex = new Duplex({
+ ...options,
+ autoDestroy: false,
+ emitClose: false,
+ objectMode: false,
+ writableObjectMode: false
+ });
+
+ ws.on('message', function message(msg, isBinary) {
+ const data =
+ !isBinary && duplex._readableState.objectMode ? msg.toString() : msg;
+
+ if (!duplex.push(data)) ws.pause();
+ });
+
+ ws.once('error', function error(err) {
+ if (duplex.destroyed) return;
+
+ // Prevent `ws.terminate()` from being called by `duplex._destroy()`.
+ //
+ // - If the `'error'` event is emitted before the `'open'` event, then
+ // `ws.terminate()` is a noop as no socket is assigned.
+ // - Otherwise, the error is re-emitted by the listener of the `'error'`
+ // event of the `Receiver` object. The listener already closes the
+ // connection by calling `ws.close()`. This allows a close frame to be
+ // sent to the other peer. If `ws.terminate()` is called right after this,
+ // then the close frame might not be sent.
+ terminateOnDestroy = false;
+ duplex.destroy(err);
+ });
+
+ ws.once('close', function close() {
+ if (duplex.destroyed) return;
+
+ duplex.push(null);
+ });
+
+ duplex._destroy = function (err, callback) {
+ if (ws.readyState === ws.CLOSED) {
+ callback(err);
+ process.nextTick(emitClose, duplex);
+ return;
+ }
+
+ let called = false;
+
+ ws.once('error', function error(err) {
+ called = true;
+ callback(err);
+ });
+
+ ws.once('close', function close() {
+ if (!called) callback(err);
+ process.nextTick(emitClose, duplex);
+ });
+
+ if (terminateOnDestroy) ws.terminate();
+ };
+
+ duplex._final = function (callback) {
+ if (ws.readyState === ws.CONNECTING) {
+ ws.once('open', function open() {
+ duplex._final(callback);
+ });
+ return;
+ }
+
+ // If the value of the `_socket` property is `null` it means that `ws` is a
+ // client websocket and the handshake failed. In fact, when this happens, a
+ // socket is never assigned to the websocket. Wait for the `'error'` event
+ // that will be emitted by the websocket.
+ if (ws._socket === null) return;
+
+ if (ws._socket._writableState.finished) {
+ callback();
+ if (duplex._readableState.endEmitted) duplex.destroy();
+ } else {
+ ws._socket.once('finish', function finish() {
+ // `duplex` is not destroyed here because the `'end'` event will be
+ // emitted on `duplex` after this `'finish'` event. The EOF signaling
+ // `null` chunk is, in fact, pushed when the websocket emits `'close'`.
+ callback();
+ });
+ ws.close();
+ }
+ };
+
+ duplex._read = function () {
+ if (ws.isPaused) ws.resume();
+ };
+
+ duplex._write = function (chunk, encoding, callback) {
+ if (ws.readyState === ws.CONNECTING) {
+ ws.once('open', function open() {
+ duplex._write(chunk, encoding, callback);
+ });
+ return;
+ }
+
+ ws.send(chunk, callback);
+ };
+
+ duplex.on('end', duplexOnEnd);
+ duplex.on('error', duplexOnError);
+ return duplex;
+}
+
+module.exports = createWebSocketStream;
diff --git a/testing/xpcshell/node-ws/lib/subprotocol.js b/testing/xpcshell/node-ws/lib/subprotocol.js
new file mode 100644
index 0000000000..d4381e8864
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/subprotocol.js
@@ -0,0 +1,62 @@
+'use strict';
+
+const { tokenChars } = require('./validation');
+
+/**
+ * Parses the `Sec-WebSocket-Protocol` header into a set of subprotocol names.
+ *
+ * @param {String} header The field value of the header
+ * @return {Set} The subprotocol names
+ * @public
+ */
+function parse(header) {
+ const protocols = new Set();
+ let start = -1;
+ let end = -1;
+ let i = 0;
+
+ for (i; i < header.length; i++) {
+ const code = header.charCodeAt(i);
+
+ if (end === -1 && tokenChars[code] === 1) {
+ if (start === -1) start = i;
+ } else if (
+ i !== 0 &&
+ (code === 0x20 /* ' ' */ || code === 0x09) /* '\t' */
+ ) {
+ if (end === -1 && start !== -1) end = i;
+ } else if (code === 0x2c /* ',' */) {
+ if (start === -1) {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+
+ if (end === -1) end = i;
+
+ const protocol = header.slice(start, end);
+
+ if (protocols.has(protocol)) {
+ throw new SyntaxError(`The "${protocol}" subprotocol is duplicated`);
+ }
+
+ protocols.add(protocol);
+ start = end = -1;
+ } else {
+ throw new SyntaxError(`Unexpected character at index ${i}`);
+ }
+ }
+
+ if (start === -1 || end !== -1) {
+ throw new SyntaxError('Unexpected end of input');
+ }
+
+ const protocol = header.slice(start, i);
+
+ if (protocols.has(protocol)) {
+ throw new SyntaxError(`The "${protocol}" subprotocol is duplicated`);
+ }
+
+ protocols.add(protocol);
+ return protocols;
+}
+
+module.exports = { parse };
diff --git a/testing/xpcshell/node-ws/lib/validation.js b/testing/xpcshell/node-ws/lib/validation.js
new file mode 100644
index 0000000000..44fc202906
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/validation.js
@@ -0,0 +1,125 @@
+'use strict';
+
+//
+// Allowed token characters:
+//
+// '!', '#', '$', '%', '&', ''', '*', '+', '-',
+// '.', 0-9, A-Z, '^', '_', '`', a-z, '|', '~'
+//
+// tokenChars[32] === 0 // ' '
+// tokenChars[33] === 1 // '!'
+// tokenChars[34] === 0 // '"'
+// ...
+//
+// prettier-ignore
+const tokenChars = [
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 0 - 15
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 - 31
+ 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, // 32 - 47
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, // 48 - 63
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 64 - 79
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, // 80 - 95
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 96 - 111
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0 // 112 - 127
+];
+
+/**
+ * Checks if a status code is allowed in a close frame.
+ *
+ * @param {Number} code The status code
+ * @return {Boolean} `true` if the status code is valid, else `false`
+ * @public
+ */
+function isValidStatusCode(code) {
+ return (
+ (code >= 1000 &&
+ code <= 1014 &&
+ code !== 1004 &&
+ code !== 1005 &&
+ code !== 1006) ||
+ (code >= 3000 && code <= 4999)
+ );
+}
+
+/**
+ * Checks if a given buffer contains only correct UTF-8.
+ * Ported from https://www.cl.cam.ac.uk/%7Emgk25/ucs/utf8_check.c by
+ * Markus Kuhn.
+ *
+ * @param {Buffer} buf The buffer to check
+ * @return {Boolean} `true` if `buf` contains only correct UTF-8, else `false`
+ * @public
+ */
+function _isValidUTF8(buf) {
+ const len = buf.length;
+ let i = 0;
+
+ while (i < len) {
+ if ((buf[i] & 0x80) === 0) {
+ // 0xxxxxxx
+ i++;
+ } else if ((buf[i] & 0xe0) === 0xc0) {
+ // 110xxxxx 10xxxxxx
+ if (
+ i + 1 === len ||
+ (buf[i + 1] & 0xc0) !== 0x80 ||
+ (buf[i] & 0xfe) === 0xc0 // Overlong
+ ) {
+ return false;
+ }
+
+ i += 2;
+ } else if ((buf[i] & 0xf0) === 0xe0) {
+ // 1110xxxx 10xxxxxx 10xxxxxx
+ if (
+ i + 2 >= len ||
+ (buf[i + 1] & 0xc0) !== 0x80 ||
+ (buf[i + 2] & 0xc0) !== 0x80 ||
+ (buf[i] === 0xe0 && (buf[i + 1] & 0xe0) === 0x80) || // Overlong
+ (buf[i] === 0xed && (buf[i + 1] & 0xe0) === 0xa0) // Surrogate (U+D800 - U+DFFF)
+ ) {
+ return false;
+ }
+
+ i += 3;
+ } else if ((buf[i] & 0xf8) === 0xf0) {
+ // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ if (
+ i + 3 >= len ||
+ (buf[i + 1] & 0xc0) !== 0x80 ||
+ (buf[i + 2] & 0xc0) !== 0x80 ||
+ (buf[i + 3] & 0xc0) !== 0x80 ||
+ (buf[i] === 0xf0 && (buf[i + 1] & 0xf0) === 0x80) || // Overlong
+ (buf[i] === 0xf4 && buf[i + 1] > 0x8f) ||
+ buf[i] > 0xf4 // > U+10FFFF
+ ) {
+ return false;
+ }
+
+ i += 4;
+ } else {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+module.exports = {
+ isValidStatusCode,
+ isValidUTF8: _isValidUTF8,
+ tokenChars
+};
+
+/* istanbul ignore else */
+if (!process.env.WS_NO_UTF_8_VALIDATE) {
+ try {
+ const isValidUTF8 = require('utf-8-validate');
+
+ module.exports.isValidUTF8 = function (buf) {
+ return buf.length < 150 ? _isValidUTF8(buf) : isValidUTF8(buf);
+ };
+ } catch (e) {
+ // Continue regardless of the error.
+ }
+}
diff --git a/testing/xpcshell/node-ws/lib/websocket-server.js b/testing/xpcshell/node-ws/lib/websocket-server.js
new file mode 100644
index 0000000000..bac30eb330
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/websocket-server.js
@@ -0,0 +1,535 @@
+/* eslint no-unused-vars: ["error", { "varsIgnorePattern": "^net|tls|https$" }] */
+
+'use strict';
+
+const EventEmitter = require('events');
+const http = require('http');
+const https = require('https');
+const net = require('net');
+const tls = require('tls');
+const { createHash } = require('crypto');
+
+const extension = require('./extension');
+const PerMessageDeflate = require('./permessage-deflate');
+const subprotocol = require('./subprotocol');
+const WebSocket = require('./websocket');
+const { GUID, kWebSocket } = require('./constants');
+
+const keyRegex = /^[+/0-9A-Za-z]{22}==$/;
+
+const RUNNING = 0;
+const CLOSING = 1;
+const CLOSED = 2;
+
+/**
+ * Class representing a WebSocket server.
+ *
+ * @extends EventEmitter
+ */
+class WebSocketServer extends EventEmitter {
+ /**
+ * Create a `WebSocketServer` instance.
+ *
+ * @param {Object} options Configuration options
+ * @param {Number} [options.backlog=511] The maximum length of the queue of
+ * pending connections
+ * @param {Boolean} [options.clientTracking=true] Specifies whether or not to
+ * track clients
+ * @param {Function} [options.handleProtocols] A hook to handle protocols
+ * @param {String} [options.host] The hostname where to bind the server
+ * @param {Number} [options.maxPayload=104857600] The maximum allowed message
+ * size
+ * @param {Boolean} [options.noServer=false] Enable no server mode
+ * @param {String} [options.path] Accept only connections matching this path
+ * @param {(Boolean|Object)} [options.perMessageDeflate=false] Enable/disable
+ * permessage-deflate
+ * @param {Number} [options.port] The port where to bind the server
+ * @param {(http.Server|https.Server)} [options.server] A pre-created HTTP/S
+ * server to use
+ * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or
+ * not to skip UTF-8 validation for text and close messages
+ * @param {Function} [options.verifyClient] A hook to reject connections
+ * @param {Function} [options.WebSocket=WebSocket] Specifies the `WebSocket`
+ * class to use. It must be the `WebSocket` class or class that extends it
+ * @param {Function} [callback] A listener for the `listening` event
+ */
+ constructor(options, callback) {
+ super();
+
+ options = {
+ maxPayload: 100 * 1024 * 1024,
+ skipUTF8Validation: false,
+ perMessageDeflate: false,
+ handleProtocols: null,
+ clientTracking: true,
+ verifyClient: null,
+ noServer: false,
+ backlog: null, // use default (511 as implemented in net.js)
+ server: null,
+ host: null,
+ path: null,
+ port: null,
+ WebSocket,
+ ...options
+ };
+
+ if (
+ (options.port == null && !options.server && !options.noServer) ||
+ (options.port != null && (options.server || options.noServer)) ||
+ (options.server && options.noServer)
+ ) {
+ throw new TypeError(
+ 'One and only one of the "port", "server", or "noServer" options ' +
+ 'must be specified'
+ );
+ }
+
+ if (options.port != null) {
+ this._server = http.createServer((req, res) => {
+ const body = http.STATUS_CODES[426];
+
+ res.writeHead(426, {
+ 'Content-Length': body.length,
+ 'Content-Type': 'text/plain'
+ });
+ res.end(body);
+ });
+ this._server.listen(
+ options.port,
+ options.host,
+ options.backlog,
+ callback
+ );
+ } else if (options.server) {
+ this._server = options.server;
+ }
+
+ if (this._server) {
+ const emitConnection = this.emit.bind(this, 'connection');
+
+ this._removeListeners = addListeners(this._server, {
+ listening: this.emit.bind(this, 'listening'),
+ error: this.emit.bind(this, 'error'),
+ upgrade: (req, socket, head) => {
+ this.handleUpgrade(req, socket, head, emitConnection);
+ }
+ });
+ }
+
+ if (options.perMessageDeflate === true) options.perMessageDeflate = {};
+ if (options.clientTracking) {
+ this.clients = new Set();
+ this._shouldEmitClose = false;
+ }
+
+ this.options = options;
+ this._state = RUNNING;
+ }
+
+ /**
+ * Returns the bound address, the address family name, and port of the server
+ * as reported by the operating system if listening on an IP socket.
+ * If the server is listening on a pipe or UNIX domain socket, the name is
+ * returned as a string.
+ *
+ * @return {(Object|String|null)} The address of the server
+ * @public
+ */
+ address() {
+ if (this.options.noServer) {
+ throw new Error('The server is operating in "noServer" mode');
+ }
+
+ if (!this._server) return null;
+ return this._server.address();
+ }
+
+ /**
+ * Stop the server from accepting new connections and emit the `'close'` event
+ * when all existing connections are closed.
+ *
+ * @param {Function} [cb] A one-time listener for the `'close'` event
+ * @public
+ */
+ close(cb) {
+ if (this._state === CLOSED) {
+ if (cb) {
+ this.once('close', () => {
+ cb(new Error('The server is not running'));
+ });
+ }
+
+ process.nextTick(emitClose, this);
+ return;
+ }
+
+ if (cb) this.once('close', cb);
+
+ if (this._state === CLOSING) return;
+ this._state = CLOSING;
+
+ if (this.options.noServer || this.options.server) {
+ if (this._server) {
+ this._removeListeners();
+ this._removeListeners = this._server = null;
+ }
+
+ if (this.clients) {
+ if (!this.clients.size) {
+ process.nextTick(emitClose, this);
+ } else {
+ this._shouldEmitClose = true;
+ }
+ } else {
+ process.nextTick(emitClose, this);
+ }
+ } else {
+ const server = this._server;
+
+ this._removeListeners();
+ this._removeListeners = this._server = null;
+
+ //
+ // The HTTP/S server was created internally. Close it, and rely on its
+ // `'close'` event.
+ //
+ server.close(() => {
+ emitClose(this);
+ });
+ }
+ }
+
+ /**
+ * See if a given request should be handled by this server instance.
+ *
+ * @param {http.IncomingMessage} req Request object to inspect
+ * @return {Boolean} `true` if the request is valid, else `false`
+ * @public
+ */
+ shouldHandle(req) {
+ if (this.options.path) {
+ const index = req.url.indexOf('?');
+ const pathname = index !== -1 ? req.url.slice(0, index) : req.url;
+
+ if (pathname !== this.options.path) return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Handle a HTTP Upgrade request.
+ *
+ * @param {http.IncomingMessage} req The request object
+ * @param {(net.Socket|tls.Socket)} socket The network socket between the
+ * server and client
+ * @param {Buffer} head The first packet of the upgraded stream
+ * @param {Function} cb Callback
+ * @public
+ */
+ handleUpgrade(req, socket, head, cb) {
+ socket.on('error', socketOnError);
+
+ const key = req.headers['sec-websocket-key'];
+ const version = +req.headers['sec-websocket-version'];
+
+ if (req.method !== 'GET') {
+ const message = 'Invalid HTTP method';
+ abortHandshakeOrEmitwsClientError(this, req, socket, 405, message);
+ return;
+ }
+
+ if (req.headers.upgrade.toLowerCase() !== 'websocket') {
+ const message = 'Invalid Upgrade header';
+ abortHandshakeOrEmitwsClientError(this, req, socket, 400, message);
+ return;
+ }
+
+ if (!key || !keyRegex.test(key)) {
+ const message = 'Missing or invalid Sec-WebSocket-Key header';
+ abortHandshakeOrEmitwsClientError(this, req, socket, 400, message);
+ return;
+ }
+
+ if (version !== 8 && version !== 13) {
+ const message = 'Missing or invalid Sec-WebSocket-Version header';
+ abortHandshakeOrEmitwsClientError(this, req, socket, 400, message);
+ return;
+ }
+
+ if (!this.shouldHandle(req)) {
+ abortHandshake(socket, 400);
+ return;
+ }
+
+ const secWebSocketProtocol = req.headers['sec-websocket-protocol'];
+ let protocols = new Set();
+
+ if (secWebSocketProtocol !== undefined) {
+ try {
+ protocols = subprotocol.parse(secWebSocketProtocol);
+ } catch (err) {
+ const message = 'Invalid Sec-WebSocket-Protocol header';
+ abortHandshakeOrEmitwsClientError(this, req, socket, 400, message);
+ return;
+ }
+ }
+
+ const secWebSocketExtensions = req.headers['sec-websocket-extensions'];
+ const extensions = {};
+
+ if (
+ this.options.perMessageDeflate &&
+ secWebSocketExtensions !== undefined
+ ) {
+ const perMessageDeflate = new PerMessageDeflate(
+ this.options.perMessageDeflate,
+ true,
+ this.options.maxPayload
+ );
+
+ try {
+ const offers = extension.parse(secWebSocketExtensions);
+
+ if (offers[PerMessageDeflate.extensionName]) {
+ perMessageDeflate.accept(offers[PerMessageDeflate.extensionName]);
+ extensions[PerMessageDeflate.extensionName] = perMessageDeflate;
+ }
+ } catch (err) {
+ const message =
+ 'Invalid or unacceptable Sec-WebSocket-Extensions header';
+ abortHandshakeOrEmitwsClientError(this, req, socket, 400, message);
+ return;
+ }
+ }
+
+ //
+ // Optionally call external client verification handler.
+ //
+ if (this.options.verifyClient) {
+ const info = {
+ origin:
+ req.headers[`${version === 8 ? 'sec-websocket-origin' : 'origin'}`],
+ secure: !!(req.socket.authorized || req.socket.encrypted),
+ req
+ };
+
+ if (this.options.verifyClient.length === 2) {
+ this.options.verifyClient(info, (verified, code, message, headers) => {
+ if (!verified) {
+ return abortHandshake(socket, code || 401, message, headers);
+ }
+
+ this.completeUpgrade(
+ extensions,
+ key,
+ protocols,
+ req,
+ socket,
+ head,
+ cb
+ );
+ });
+ return;
+ }
+
+ if (!this.options.verifyClient(info)) return abortHandshake(socket, 401);
+ }
+
+ this.completeUpgrade(extensions, key, protocols, req, socket, head, cb);
+ }
+
+ /**
+ * Upgrade the connection to WebSocket.
+ *
+ * @param {Object} extensions The accepted extensions
+ * @param {String} key The value of the `Sec-WebSocket-Key` header
+ * @param {Set} protocols The subprotocols
+ * @param {http.IncomingMessage} req The request object
+ * @param {(net.Socket|tls.Socket)} socket The network socket between the
+ * server and client
+ * @param {Buffer} head The first packet of the upgraded stream
+ * @param {Function} cb Callback
+ * @throws {Error} If called more than once with the same socket
+ * @private
+ */
+ completeUpgrade(extensions, key, protocols, req, socket, head, cb) {
+ //
+ // Destroy the socket if the client has already sent a FIN packet.
+ //
+ if (!socket.readable || !socket.writable) return socket.destroy();
+
+ if (socket[kWebSocket]) {
+ throw new Error(
+ 'server.handleUpgrade() was called more than once with the same ' +
+ 'socket, possibly due to a misconfiguration'
+ );
+ }
+
+ if (this._state > RUNNING) return abortHandshake(socket, 503);
+
+ const digest = createHash('sha1')
+ .update(key + GUID)
+ .digest('base64');
+
+ const headers = [
+ 'HTTP/1.1 101 Switching Protocols',
+ 'Upgrade: websocket',
+ 'Connection: Upgrade',
+ `Sec-WebSocket-Accept: ${digest}`
+ ];
+
+ const ws = new this.options.WebSocket(null);
+
+ if (protocols.size) {
+ //
+ // Optionally call external protocol selection handler.
+ //
+ const protocol = this.options.handleProtocols
+ ? this.options.handleProtocols(protocols, req)
+ : protocols.values().next().value;
+
+ if (protocol) {
+ headers.push(`Sec-WebSocket-Protocol: ${protocol}`);
+ ws._protocol = protocol;
+ }
+ }
+
+ if (extensions[PerMessageDeflate.extensionName]) {
+ const params = extensions[PerMessageDeflate.extensionName].params;
+ const value = extension.format({
+ [PerMessageDeflate.extensionName]: [params]
+ });
+ headers.push(`Sec-WebSocket-Extensions: ${value}`);
+ ws._extensions = extensions;
+ }
+
+ //
+ // Allow external modification/inspection of handshake headers.
+ //
+ this.emit('headers', headers, req);
+
+ socket.write(headers.concat('\r\n').join('\r\n'));
+ socket.removeListener('error', socketOnError);
+
+ ws.setSocket(socket, head, {
+ maxPayload: this.options.maxPayload,
+ skipUTF8Validation: this.options.skipUTF8Validation
+ });
+
+ if (this.clients) {
+ this.clients.add(ws);
+ ws.on('close', () => {
+ this.clients.delete(ws);
+
+ if (this._shouldEmitClose && !this.clients.size) {
+ process.nextTick(emitClose, this);
+ }
+ });
+ }
+
+ cb(ws, req);
+ }
+}
+
+module.exports = WebSocketServer;
+
+/**
+ * Add event listeners on an `EventEmitter` using a map of <event, listener>
+ * pairs.
+ *
+ * @param {EventEmitter} server The event emitter
+ * @param {Object.<String, Function>} map The listeners to add
+ * @return {Function} A function that will remove the added listeners when
+ * called
+ * @private
+ */
+function addListeners(server, map) {
+ for (const event of Object.keys(map)) server.on(event, map[event]);
+
+ return function removeListeners() {
+ for (const event of Object.keys(map)) {
+ server.removeListener(event, map[event]);
+ }
+ };
+}
+
+/**
+ * Emit a `'close'` event on an `EventEmitter`.
+ *
+ * @param {EventEmitter} server The event emitter
+ * @private
+ */
+function emitClose(server) {
+ server._state = CLOSED;
+ server.emit('close');
+}
+
+/**
+ * Handle socket errors.
+ *
+ * @private
+ */
+function socketOnError() {
+ this.destroy();
+}
+
+/**
+ * Close the connection when preconditions are not fulfilled.
+ *
+ * @param {(net.Socket|tls.Socket)} socket The socket of the upgrade request
+ * @param {Number} code The HTTP response status code
+ * @param {String} [message] The HTTP response body
+ * @param {Object} [headers] Additional HTTP response headers
+ * @private
+ */
+function abortHandshake(socket, code, message, headers) {
+ //
+ // The socket is writable unless the user destroyed or ended it before calling
+ // `server.handleUpgrade()` or in the `verifyClient` function, which is a user
+ // error. Handling this does not make much sense as the worst that can happen
+ // is that some of the data written by the user might be discarded due to the
+ // call to `socket.end()` below, which triggers an `'error'` event that in
+ // turn causes the socket to be destroyed.
+ //
+ message = message || http.STATUS_CODES[code];
+ headers = {
+ Connection: 'close',
+ 'Content-Type': 'text/html',
+ 'Content-Length': Buffer.byteLength(message),
+ ...headers
+ };
+
+ socket.once('finish', socket.destroy);
+
+ socket.end(
+ `HTTP/1.1 ${code} ${http.STATUS_CODES[code]}\r\n` +
+ Object.keys(headers)
+ .map((h) => `${h}: ${headers[h]}`)
+ .join('\r\n') +
+ '\r\n\r\n' +
+ message
+ );
+}
+
+/**
+ * Emit a `'wsClientError'` event on a `WebSocketServer` if there is at least
+ * one listener for it, otherwise call `abortHandshake()`.
+ *
+ * @param {WebSocketServer} server The WebSocket server
+ * @param {http.IncomingMessage} req The request object
+ * @param {(net.Socket|tls.Socket)} socket The socket of the upgrade request
+ * @param {Number} code The HTTP response status code
+ * @param {String} message The HTTP response body
+ * @private
+ */
+function abortHandshakeOrEmitwsClientError(server, req, socket, code, message) {
+ if (server.listenerCount('wsClientError')) {
+ const err = new Error(message);
+ Error.captureStackTrace(err, abortHandshakeOrEmitwsClientError);
+
+ server.emit('wsClientError', err, socket, req);
+ } else {
+ abortHandshake(socket, code, message);
+ }
+}
diff --git a/testing/xpcshell/node-ws/lib/websocket.js b/testing/xpcshell/node-ws/lib/websocket.js
new file mode 100644
index 0000000000..3132cc1500
--- /dev/null
+++ b/testing/xpcshell/node-ws/lib/websocket.js
@@ -0,0 +1,1305 @@
+/* eslint no-unused-vars: ["error", { "varsIgnorePattern": "^Readable$" }] */
+
+'use strict';
+
+const EventEmitter = require('events');
+const https = require('https');
+const http = require('http');
+const net = require('net');
+const tls = require('tls');
+const { randomBytes, createHash } = require('crypto');
+const { Readable } = require('stream');
+const { URL } = require('url');
+
+const PerMessageDeflate = require('./permessage-deflate');
+const Receiver = require('./receiver');
+const Sender = require('./sender');
+const {
+ BINARY_TYPES,
+ EMPTY_BUFFER,
+ GUID,
+ kForOnEventAttribute,
+ kListener,
+ kStatusCode,
+ kWebSocket,
+ NOOP
+} = require('./constants');
+const {
+ EventTarget: { addEventListener, removeEventListener }
+} = require('./event-target');
+const { format, parse } = require('./extension');
+const { toBuffer } = require('./buffer-util');
+
+const closeTimeout = 30 * 1000;
+const kAborted = Symbol('kAborted');
+const protocolVersions = [8, 13];
+const readyStates = ['CONNECTING', 'OPEN', 'CLOSING', 'CLOSED'];
+const subprotocolRegex = /^[!#$%&'*+\-.0-9A-Z^_`|a-z~]+$/;
+
+/**
+ * Class representing a WebSocket.
+ *
+ * @extends EventEmitter
+ */
+class WebSocket extends EventEmitter {
+ /**
+ * Create a new `WebSocket`.
+ *
+ * @param {(String|URL)} address The URL to which to connect
+ * @param {(String|String[])} [protocols] The subprotocols
+ * @param {Object} [options] Connection options
+ */
+ constructor(address, protocols, options) {
+ super();
+
+ this._binaryType = BINARY_TYPES[0];
+ this._closeCode = 1006;
+ this._closeFrameReceived = false;
+ this._closeFrameSent = false;
+ this._closeMessage = EMPTY_BUFFER;
+ this._closeTimer = null;
+ this._extensions = {};
+ this._paused = false;
+ this._protocol = '';
+ this._readyState = WebSocket.CONNECTING;
+ this._receiver = null;
+ this._sender = null;
+ this._socket = null;
+
+ if (address !== null) {
+ this._bufferedAmount = 0;
+ this._isServer = false;
+ this._redirects = 0;
+
+ if (protocols === undefined) {
+ protocols = [];
+ } else if (!Array.isArray(protocols)) {
+ if (typeof protocols === 'object' && protocols !== null) {
+ options = protocols;
+ protocols = [];
+ } else {
+ protocols = [protocols];
+ }
+ }
+
+ initAsClient(this, address, protocols, options);
+ } else {
+ this._isServer = true;
+ }
+ }
+
+ /**
+ * This deviates from the WHATWG interface since ws doesn't support the
+ * required default "blob" type (instead we define a custom "nodebuffer"
+ * type).
+ *
+ * @type {String}
+ */
+ get binaryType() {
+ return this._binaryType;
+ }
+
+ set binaryType(type) {
+ if (!BINARY_TYPES.includes(type)) return;
+
+ this._binaryType = type;
+
+ //
+ // Allow to change `binaryType` on the fly.
+ //
+ if (this._receiver) this._receiver._binaryType = type;
+ }
+
+ /**
+ * @type {Number}
+ */
+ get bufferedAmount() {
+ if (!this._socket) return this._bufferedAmount;
+
+ return this._socket._writableState.length + this._sender._bufferedBytes;
+ }
+
+ /**
+ * @type {String}
+ */
+ get extensions() {
+ return Object.keys(this._extensions).join();
+ }
+
+ /**
+ * @type {Boolean}
+ */
+ get isPaused() {
+ return this._paused;
+ }
+
+ /**
+ * @type {Function}
+ */
+ /* istanbul ignore next */
+ get onclose() {
+ return null;
+ }
+
+ /**
+ * @type {Function}
+ */
+ /* istanbul ignore next */
+ get onerror() {
+ return null;
+ }
+
+ /**
+ * @type {Function}
+ */
+ /* istanbul ignore next */
+ get onopen() {
+ return null;
+ }
+
+ /**
+ * @type {Function}
+ */
+ /* istanbul ignore next */
+ get onmessage() {
+ return null;
+ }
+
+ /**
+ * @type {String}
+ */
+ get protocol() {
+ return this._protocol;
+ }
+
+ /**
+ * @type {Number}
+ */
+ get readyState() {
+ return this._readyState;
+ }
+
+ /**
+ * @type {String}
+ */
+ get url() {
+ return this._url;
+ }
+
+ /**
+ * Set up the socket and the internal resources.
+ *
+ * @param {(net.Socket|tls.Socket)} socket The network socket between the
+ * server and client
+ * @param {Buffer} head The first packet of the upgraded stream
+ * @param {Object} options Options object
+ * @param {Function} [options.generateMask] The function used to generate the
+ * masking key
+ * @param {Number} [options.maxPayload=0] The maximum allowed message size
+ * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or
+ * not to skip UTF-8 validation for text and close messages
+ * @private
+ */
+ setSocket(socket, head, options) {
+ const receiver = new Receiver({
+ binaryType: this.binaryType,
+ extensions: this._extensions,
+ isServer: this._isServer,
+ maxPayload: options.maxPayload,
+ skipUTF8Validation: options.skipUTF8Validation
+ });
+
+ this._sender = new Sender(socket, this._extensions, options.generateMask);
+ this._receiver = receiver;
+ this._socket = socket;
+
+ receiver[kWebSocket] = this;
+ socket[kWebSocket] = this;
+
+ receiver.on('conclude', receiverOnConclude);
+ receiver.on('drain', receiverOnDrain);
+ receiver.on('error', receiverOnError);
+ receiver.on('message', receiverOnMessage);
+ receiver.on('ping', receiverOnPing);
+ receiver.on('pong', receiverOnPong);
+
+ socket.setTimeout(0);
+ socket.setNoDelay();
+
+ if (head.length > 0) socket.unshift(head);
+
+ socket.on('close', socketOnClose);
+ socket.on('data', socketOnData);
+ socket.on('end', socketOnEnd);
+ socket.on('error', socketOnError);
+
+ this._readyState = WebSocket.OPEN;
+ this.emit('open');
+ }
+
+ /**
+ * Emit the `'close'` event.
+ *
+ * @private
+ */
+ emitClose() {
+ if (!this._socket) {
+ this._readyState = WebSocket.CLOSED;
+ this.emit('close', this._closeCode, this._closeMessage);
+ return;
+ }
+
+ if (this._extensions[PerMessageDeflate.extensionName]) {
+ this._extensions[PerMessageDeflate.extensionName].cleanup();
+ }
+
+ this._receiver.removeAllListeners();
+ this._readyState = WebSocket.CLOSED;
+ this.emit('close', this._closeCode, this._closeMessage);
+ }
+
+ /**
+ * Start a closing handshake.
+ *
+ * +----------+ +-----------+ +----------+
+ * - - -|ws.close()|-->|close frame|-->|ws.close()|- - -
+ * | +----------+ +-----------+ +----------+ |
+ * +----------+ +-----------+ |
+ * CLOSING |ws.close()|<--|close frame|<--+-----+ CLOSING
+ * +----------+ +-----------+ |
+ * | | | +---+ |
+ * +------------------------+-->|fin| - - - -
+ * | +---+ | +---+
+ * - - - - -|fin|<---------------------+
+ * +---+
+ *
+ * @param {Number} [code] Status code explaining why the connection is closing
+ * @param {(String|Buffer)} [data] The reason why the connection is
+ * closing
+ * @public
+ */
+ close(code, data) {
+ if (this.readyState === WebSocket.CLOSED) return;
+ if (this.readyState === WebSocket.CONNECTING) {
+ const msg = 'WebSocket was closed before the connection was established';
+ return abortHandshake(this, this._req, msg);
+ }
+
+ if (this.readyState === WebSocket.CLOSING) {
+ if (
+ this._closeFrameSent &&
+ (this._closeFrameReceived || this._receiver._writableState.errorEmitted)
+ ) {
+ this._socket.end();
+ }
+
+ return;
+ }
+
+ this._readyState = WebSocket.CLOSING;
+ this._sender.close(code, data, !this._isServer, (err) => {
+ //
+ // This error is handled by the `'error'` listener on the socket. We only
+ // want to know if the close frame has been sent here.
+ //
+ if (err) return;
+
+ this._closeFrameSent = true;
+
+ if (
+ this._closeFrameReceived ||
+ this._receiver._writableState.errorEmitted
+ ) {
+ this._socket.end();
+ }
+ });
+
+ //
+ // Specify a timeout for the closing handshake to complete.
+ //
+ this._closeTimer = setTimeout(
+ this._socket.destroy.bind(this._socket),
+ closeTimeout
+ );
+ }
+
+ /**
+ * Pause the socket.
+ *
+ * @public
+ */
+ pause() {
+ if (
+ this.readyState === WebSocket.CONNECTING ||
+ this.readyState === WebSocket.CLOSED
+ ) {
+ return;
+ }
+
+ this._paused = true;
+ this._socket.pause();
+ }
+
+ /**
+ * Send a ping.
+ *
+ * @param {*} [data] The data to send
+ * @param {Boolean} [mask] Indicates whether or not to mask `data`
+ * @param {Function} [cb] Callback which is executed when the ping is sent
+ * @public
+ */
+ ping(data, mask, cb) {
+ if (this.readyState === WebSocket.CONNECTING) {
+ throw new Error('WebSocket is not open: readyState 0 (CONNECTING)');
+ }
+
+ if (typeof data === 'function') {
+ cb = data;
+ data = mask = undefined;
+ } else if (typeof mask === 'function') {
+ cb = mask;
+ mask = undefined;
+ }
+
+ if (typeof data === 'number') data = data.toString();
+
+ if (this.readyState !== WebSocket.OPEN) {
+ sendAfterClose(this, data, cb);
+ return;
+ }
+
+ if (mask === undefined) mask = !this._isServer;
+ this._sender.ping(data || EMPTY_BUFFER, mask, cb);
+ }
+
+ /**
+ * Send a pong.
+ *
+ * @param {*} [data] The data to send
+ * @param {Boolean} [mask] Indicates whether or not to mask `data`
+ * @param {Function} [cb] Callback which is executed when the pong is sent
+ * @public
+ */
+ pong(data, mask, cb) {
+ if (this.readyState === WebSocket.CONNECTING) {
+ throw new Error('WebSocket is not open: readyState 0 (CONNECTING)');
+ }
+
+ if (typeof data === 'function') {
+ cb = data;
+ data = mask = undefined;
+ } else if (typeof mask === 'function') {
+ cb = mask;
+ mask = undefined;
+ }
+
+ if (typeof data === 'number') data = data.toString();
+
+ if (this.readyState !== WebSocket.OPEN) {
+ sendAfterClose(this, data, cb);
+ return;
+ }
+
+ if (mask === undefined) mask = !this._isServer;
+ this._sender.pong(data || EMPTY_BUFFER, mask, cb);
+ }
+
+ /**
+ * Resume the socket.
+ *
+ * @public
+ */
+ resume() {
+ if (
+ this.readyState === WebSocket.CONNECTING ||
+ this.readyState === WebSocket.CLOSED
+ ) {
+ return;
+ }
+
+ this._paused = false;
+ if (!this._receiver._writableState.needDrain) this._socket.resume();
+ }
+
+ /**
+ * Send a data message.
+ *
+ * @param {*} data The message to send
+ * @param {Object} [options] Options object
+ * @param {Boolean} [options.binary] Specifies whether `data` is binary or
+ * text
+ * @param {Boolean} [options.compress] Specifies whether or not to compress
+ * `data`
+ * @param {Boolean} [options.fin=true] Specifies whether the fragment is the
+ * last one
+ * @param {Boolean} [options.mask] Specifies whether or not to mask `data`
+ * @param {Function} [cb] Callback which is executed when data is written out
+ * @public
+ */
+ send(data, options, cb) {
+ if (this.readyState === WebSocket.CONNECTING) {
+ throw new Error('WebSocket is not open: readyState 0 (CONNECTING)');
+ }
+
+ if (typeof options === 'function') {
+ cb = options;
+ options = {};
+ }
+
+ if (typeof data === 'number') data = data.toString();
+
+ if (this.readyState !== WebSocket.OPEN) {
+ sendAfterClose(this, data, cb);
+ return;
+ }
+
+ const opts = {
+ binary: typeof data !== 'string',
+ mask: !this._isServer,
+ compress: true,
+ fin: true,
+ ...options
+ };
+
+ if (!this._extensions[PerMessageDeflate.extensionName]) {
+ opts.compress = false;
+ }
+
+ this._sender.send(data || EMPTY_BUFFER, opts, cb);
+ }
+
+ /**
+ * Forcibly close the connection.
+ *
+ * @public
+ */
+ terminate() {
+ if (this.readyState === WebSocket.CLOSED) return;
+ if (this.readyState === WebSocket.CONNECTING) {
+ const msg = 'WebSocket was closed before the connection was established';
+ return abortHandshake(this, this._req, msg);
+ }
+
+ if (this._socket) {
+ this._readyState = WebSocket.CLOSING;
+ this._socket.destroy();
+ }
+ }
+}
+
+/**
+ * @constant {Number} CONNECTING
+ * @memberof WebSocket
+ */
+Object.defineProperty(WebSocket, 'CONNECTING', {
+ enumerable: true,
+ value: readyStates.indexOf('CONNECTING')
+});
+
+/**
+ * @constant {Number} CONNECTING
+ * @memberof WebSocket.prototype
+ */
+Object.defineProperty(WebSocket.prototype, 'CONNECTING', {
+ enumerable: true,
+ value: readyStates.indexOf('CONNECTING')
+});
+
+/**
+ * @constant {Number} OPEN
+ * @memberof WebSocket
+ */
+Object.defineProperty(WebSocket, 'OPEN', {
+ enumerable: true,
+ value: readyStates.indexOf('OPEN')
+});
+
+/**
+ * @constant {Number} OPEN
+ * @memberof WebSocket.prototype
+ */
+Object.defineProperty(WebSocket.prototype, 'OPEN', {
+ enumerable: true,
+ value: readyStates.indexOf('OPEN')
+});
+
+/**
+ * @constant {Number} CLOSING
+ * @memberof WebSocket
+ */
+Object.defineProperty(WebSocket, 'CLOSING', {
+ enumerable: true,
+ value: readyStates.indexOf('CLOSING')
+});
+
+/**
+ * @constant {Number} CLOSING
+ * @memberof WebSocket.prototype
+ */
+Object.defineProperty(WebSocket.prototype, 'CLOSING', {
+ enumerable: true,
+ value: readyStates.indexOf('CLOSING')
+});
+
+/**
+ * @constant {Number} CLOSED
+ * @memberof WebSocket
+ */
+Object.defineProperty(WebSocket, 'CLOSED', {
+ enumerable: true,
+ value: readyStates.indexOf('CLOSED')
+});
+
+/**
+ * @constant {Number} CLOSED
+ * @memberof WebSocket.prototype
+ */
+Object.defineProperty(WebSocket.prototype, 'CLOSED', {
+ enumerable: true,
+ value: readyStates.indexOf('CLOSED')
+});
+
+[
+ 'binaryType',
+ 'bufferedAmount',
+ 'extensions',
+ 'isPaused',
+ 'protocol',
+ 'readyState',
+ 'url'
+].forEach((property) => {
+ Object.defineProperty(WebSocket.prototype, property, { enumerable: true });
+});
+
+//
+// Add the `onopen`, `onerror`, `onclose`, and `onmessage` attributes.
+// See https://html.spec.whatwg.org/multipage/comms.html#the-websocket-interface
+//
+['open', 'error', 'close', 'message'].forEach((method) => {
+ Object.defineProperty(WebSocket.prototype, `on${method}`, {
+ enumerable: true,
+ get() {
+ for (const listener of this.listeners(method)) {
+ if (listener[kForOnEventAttribute]) return listener[kListener];
+ }
+
+ return null;
+ },
+ set(handler) {
+ for (const listener of this.listeners(method)) {
+ if (listener[kForOnEventAttribute]) {
+ this.removeListener(method, listener);
+ break;
+ }
+ }
+
+ if (typeof handler !== 'function') return;
+
+ this.addEventListener(method, handler, {
+ [kForOnEventAttribute]: true
+ });
+ }
+ });
+});
+
+WebSocket.prototype.addEventListener = addEventListener;
+WebSocket.prototype.removeEventListener = removeEventListener;
+
+module.exports = WebSocket;
+
+/**
+ * Initialize a WebSocket client.
+ *
+ * @param {WebSocket} websocket The client to initialize
+ * @param {(String|URL)} address The URL to which to connect
+ * @param {Array} protocols The subprotocols
+ * @param {Object} [options] Connection options
+ * @param {Boolean} [options.followRedirects=false] Whether or not to follow
+ * redirects
+ * @param {Function} [options.generateMask] The function used to generate the
+ * masking key
+ * @param {Number} [options.handshakeTimeout] Timeout in milliseconds for the
+ * handshake request
+ * @param {Number} [options.maxPayload=104857600] The maximum allowed message
+ * size
+ * @param {Number} [options.maxRedirects=10] The maximum number of redirects
+ * allowed
+ * @param {String} [options.origin] Value of the `Origin` or
+ * `Sec-WebSocket-Origin` header
+ * @param {(Boolean|Object)} [options.perMessageDeflate=true] Enable/disable
+ * permessage-deflate
+ * @param {Number} [options.protocolVersion=13] Value of the
+ * `Sec-WebSocket-Version` header
+ * @param {Boolean} [options.skipUTF8Validation=false] Specifies whether or
+ * not to skip UTF-8 validation for text and close messages
+ * @private
+ */
+function initAsClient(websocket, address, protocols, options) {
+ const opts = {
+ protocolVersion: protocolVersions[1],
+ maxPayload: 100 * 1024 * 1024,
+ skipUTF8Validation: false,
+ perMessageDeflate: true,
+ followRedirects: false,
+ maxRedirects: 10,
+ ...options,
+ createConnection: undefined,
+ socketPath: undefined,
+ hostname: undefined,
+ protocol: undefined,
+ timeout: undefined,
+ method: 'GET',
+ host: undefined,
+ path: undefined,
+ port: undefined
+ };
+
+ if (!protocolVersions.includes(opts.protocolVersion)) {
+ throw new RangeError(
+ `Unsupported protocol version: ${opts.protocolVersion} ` +
+ `(supported versions: ${protocolVersions.join(', ')})`
+ );
+ }
+
+ let parsedUrl;
+
+ if (address instanceof URL) {
+ parsedUrl = address;
+ websocket._url = address.href;
+ } else {
+ try {
+ parsedUrl = new URL(address);
+ } catch (e) {
+ throw new SyntaxError(`Invalid URL: ${address}`);
+ }
+
+ websocket._url = address;
+ }
+
+ const isSecure = parsedUrl.protocol === 'wss:';
+ const isUnixSocket = parsedUrl.protocol === 'ws+unix:';
+ let invalidURLMessage;
+
+ if (parsedUrl.protocol !== 'ws:' && !isSecure && !isUnixSocket) {
+ invalidURLMessage =
+ 'The URL\'s protocol must be one of "ws:", "wss:", or "ws+unix:"';
+ } else if (isUnixSocket && !parsedUrl.pathname) {
+ invalidURLMessage = "The URL's pathname is empty";
+ } else if (parsedUrl.hash) {
+ invalidURLMessage = 'The URL contains a fragment identifier';
+ }
+
+ if (invalidURLMessage) {
+ const err = new SyntaxError(invalidURLMessage);
+
+ if (websocket._redirects === 0) {
+ throw err;
+ } else {
+ emitErrorAndClose(websocket, err);
+ return;
+ }
+ }
+
+ const defaultPort = isSecure ? 443 : 80;
+ const key = randomBytes(16).toString('base64');
+ const request = isSecure ? https.request : http.request;
+ const protocolSet = new Set();
+ let perMessageDeflate;
+
+ opts.createConnection = isSecure ? tlsConnect : netConnect;
+ opts.defaultPort = opts.defaultPort || defaultPort;
+ opts.port = parsedUrl.port || defaultPort;
+ opts.host = parsedUrl.hostname.startsWith('[')
+ ? parsedUrl.hostname.slice(1, -1)
+ : parsedUrl.hostname;
+ opts.headers = {
+ ...opts.headers,
+ 'Sec-WebSocket-Version': opts.protocolVersion,
+ 'Sec-WebSocket-Key': key,
+ Connection: 'Upgrade',
+ Upgrade: 'websocket'
+ };
+ opts.path = parsedUrl.pathname + parsedUrl.search;
+ opts.timeout = opts.handshakeTimeout;
+
+ if (opts.perMessageDeflate) {
+ perMessageDeflate = new PerMessageDeflate(
+ opts.perMessageDeflate !== true ? opts.perMessageDeflate : {},
+ false,
+ opts.maxPayload
+ );
+ opts.headers['Sec-WebSocket-Extensions'] = format({
+ [PerMessageDeflate.extensionName]: perMessageDeflate.offer()
+ });
+ }
+ if (protocols.length) {
+ for (const protocol of protocols) {
+ if (
+ typeof protocol !== 'string' ||
+ !subprotocolRegex.test(protocol) ||
+ protocolSet.has(protocol)
+ ) {
+ throw new SyntaxError(
+ 'An invalid or duplicated subprotocol was specified'
+ );
+ }
+
+ protocolSet.add(protocol);
+ }
+
+ opts.headers['Sec-WebSocket-Protocol'] = protocols.join(',');
+ }
+ if (opts.origin) {
+ if (opts.protocolVersion < 13) {
+ opts.headers['Sec-WebSocket-Origin'] = opts.origin;
+ } else {
+ opts.headers.Origin = opts.origin;
+ }
+ }
+ if (parsedUrl.username || parsedUrl.password) {
+ opts.auth = `${parsedUrl.username}:${parsedUrl.password}`;
+ }
+
+ if (isUnixSocket) {
+ const parts = opts.path.split(':');
+
+ opts.socketPath = parts[0];
+ opts.path = parts[1];
+ }
+
+ let req;
+
+ if (opts.followRedirects) {
+ if (websocket._redirects === 0) {
+ websocket._originalUnixSocket = isUnixSocket;
+ websocket._originalSecure = isSecure;
+ websocket._originalHostOrSocketPath = isUnixSocket
+ ? opts.socketPath
+ : parsedUrl.host;
+
+ const headers = options && options.headers;
+
+ //
+ // Shallow copy the user provided options so that headers can be changed
+ // without mutating the original object.
+ //
+ options = { ...options, headers: {} };
+
+ if (headers) {
+ for (const [key, value] of Object.entries(headers)) {
+ options.headers[key.toLowerCase()] = value;
+ }
+ }
+ } else if (websocket.listenerCount('redirect') === 0) {
+ const isSameHost = isUnixSocket
+ ? websocket._originalUnixSocket
+ ? opts.socketPath === websocket._originalHostOrSocketPath
+ : false
+ : websocket._originalUnixSocket
+ ? false
+ : parsedUrl.host === websocket._originalHostOrSocketPath;
+
+ if (!isSameHost || (websocket._originalSecure && !isSecure)) {
+ //
+ // Match curl 7.77.0 behavior and drop the following headers. These
+ // headers are also dropped when following a redirect to a subdomain.
+ //
+ delete opts.headers.authorization;
+ delete opts.headers.cookie;
+
+ if (!isSameHost) delete opts.headers.host;
+
+ opts.auth = undefined;
+ }
+ }
+
+ //
+ // Match curl 7.77.0 behavior and make the first `Authorization` header win.
+ // If the `Authorization` header is set, then there is nothing to do as it
+ // will take precedence.
+ //
+ if (opts.auth && !options.headers.authorization) {
+ options.headers.authorization =
+ 'Basic ' + Buffer.from(opts.auth).toString('base64');
+ }
+
+ req = websocket._req = request(opts);
+
+ if (websocket._redirects) {
+ //
+ // Unlike what is done for the `'upgrade'` event, no early exit is
+ // triggered here if the user calls `websocket.close()` or
+ // `websocket.terminate()` from a listener of the `'redirect'` event. This
+ // is because the user can also call `request.destroy()` with an error
+ // before calling `websocket.close()` or `websocket.terminate()` and this
+ // would result in an error being emitted on the `request` object with no
+ // `'error'` event listeners attached.
+ //
+ websocket.emit('redirect', websocket.url, req);
+ }
+ } else {
+ req = websocket._req = request(opts);
+ }
+
+ if (opts.timeout) {
+ req.on('timeout', () => {
+ abortHandshake(websocket, req, 'Opening handshake has timed out');
+ });
+ }
+
+ req.on('error', (err) => {
+ if (req === null || req[kAborted]) return;
+
+ req = websocket._req = null;
+ emitErrorAndClose(websocket, err);
+ });
+
+ req.on('response', (res) => {
+ const location = res.headers.location;
+ const statusCode = res.statusCode;
+
+ if (
+ location &&
+ opts.followRedirects &&
+ statusCode >= 300 &&
+ statusCode < 400
+ ) {
+ if (++websocket._redirects > opts.maxRedirects) {
+ abortHandshake(websocket, req, 'Maximum redirects exceeded');
+ return;
+ }
+
+ req.abort();
+
+ let addr;
+
+ try {
+ addr = new URL(location, address);
+ } catch (e) {
+ const err = new SyntaxError(`Invalid URL: ${location}`);
+ emitErrorAndClose(websocket, err);
+ return;
+ }
+
+ initAsClient(websocket, addr, protocols, options);
+ } else if (!websocket.emit('unexpected-response', req, res)) {
+ abortHandshake(
+ websocket,
+ req,
+ `Unexpected server response: ${res.statusCode}`
+ );
+ }
+ });
+
+ req.on('upgrade', (res, socket, head) => {
+ websocket.emit('upgrade', res);
+
+ //
+ // The user may have closed the connection from a listener of the
+ // `'upgrade'` event.
+ //
+ if (websocket.readyState !== WebSocket.CONNECTING) return;
+
+ req = websocket._req = null;
+
+ if (res.headers.upgrade.toLowerCase() !== 'websocket') {
+ abortHandshake(websocket, socket, 'Invalid Upgrade header');
+ return;
+ }
+
+ const digest = createHash('sha1')
+ .update(key + GUID)
+ .digest('base64');
+
+ if (res.headers['sec-websocket-accept'] !== digest) {
+ abortHandshake(websocket, socket, 'Invalid Sec-WebSocket-Accept header');
+ return;
+ }
+
+ const serverProt = res.headers['sec-websocket-protocol'];
+ let protError;
+
+ if (serverProt !== undefined) {
+ if (!protocolSet.size) {
+ protError = 'Server sent a subprotocol but none was requested';
+ } else if (!protocolSet.has(serverProt)) {
+ protError = 'Server sent an invalid subprotocol';
+ }
+ } else if (protocolSet.size) {
+ protError = 'Server sent no subprotocol';
+ }
+
+ if (protError) {
+ abortHandshake(websocket, socket, protError);
+ return;
+ }
+
+ if (serverProt) websocket._protocol = serverProt;
+
+ const secWebSocketExtensions = res.headers['sec-websocket-extensions'];
+
+ if (secWebSocketExtensions !== undefined) {
+ if (!perMessageDeflate) {
+ const message =
+ 'Server sent a Sec-WebSocket-Extensions header but no extension ' +
+ 'was requested';
+ abortHandshake(websocket, socket, message);
+ return;
+ }
+
+ let extensions;
+
+ try {
+ extensions = parse(secWebSocketExtensions);
+ } catch (err) {
+ const message = 'Invalid Sec-WebSocket-Extensions header';
+ abortHandshake(websocket, socket, message);
+ return;
+ }
+
+ const extensionNames = Object.keys(extensions);
+
+ if (
+ extensionNames.length !== 1 ||
+ extensionNames[0] !== PerMessageDeflate.extensionName
+ ) {
+ const message = 'Server indicated an extension that was not requested';
+ abortHandshake(websocket, socket, message);
+ return;
+ }
+
+ try {
+ perMessageDeflate.accept(extensions[PerMessageDeflate.extensionName]);
+ } catch (err) {
+ const message = 'Invalid Sec-WebSocket-Extensions header';
+ abortHandshake(websocket, socket, message);
+ return;
+ }
+
+ websocket._extensions[PerMessageDeflate.extensionName] =
+ perMessageDeflate;
+ }
+
+ websocket.setSocket(socket, head, {
+ generateMask: opts.generateMask,
+ maxPayload: opts.maxPayload,
+ skipUTF8Validation: opts.skipUTF8Validation
+ });
+ });
+
+ req.end();
+}
+
+/**
+ * Emit the `'error'` and `'close'` events.
+ *
+ * @param {WebSocket} websocket The WebSocket instance
+ * @param {Error} The error to emit
+ * @private
+ */
+function emitErrorAndClose(websocket, err) {
+ websocket._readyState = WebSocket.CLOSING;
+ websocket.emit('error', err);
+ websocket.emitClose();
+}
+
+/**
+ * Create a `net.Socket` and initiate a connection.
+ *
+ * @param {Object} options Connection options
+ * @return {net.Socket} The newly created socket used to start the connection
+ * @private
+ */
+function netConnect(options) {
+ options.path = options.socketPath;
+ return net.connect(options);
+}
+
+/**
+ * Create a `tls.TLSSocket` and initiate a connection.
+ *
+ * @param {Object} options Connection options
+ * @return {tls.TLSSocket} The newly created socket used to start the connection
+ * @private
+ */
+function tlsConnect(options) {
+ options.path = undefined;
+
+ if (!options.servername && options.servername !== '') {
+ options.servername = net.isIP(options.host) ? '' : options.host;
+ }
+
+ return tls.connect(options);
+}
+
+/**
+ * Abort the handshake and emit an error.
+ *
+ * @param {WebSocket} websocket The WebSocket instance
+ * @param {(http.ClientRequest|net.Socket|tls.Socket)} stream The request to
+ * abort or the socket to destroy
+ * @param {String} message The error message
+ * @private
+ */
+function abortHandshake(websocket, stream, message) {
+ websocket._readyState = WebSocket.CLOSING;
+
+ const err = new Error(message);
+ Error.captureStackTrace(err, abortHandshake);
+
+ if (stream.setHeader) {
+ stream[kAborted] = true;
+ stream.abort();
+
+ if (stream.socket && !stream.socket.destroyed) {
+ //
+ // On Node.js >= 14.3.0 `request.abort()` does not destroy the socket if
+ // called after the request completed. See
+ // https://github.com/websockets/ws/issues/1869.
+ //
+ stream.socket.destroy();
+ }
+
+ process.nextTick(emitErrorAndClose, websocket, err);
+ } else {
+ stream.destroy(err);
+ stream.once('error', websocket.emit.bind(websocket, 'error'));
+ stream.once('close', websocket.emitClose.bind(websocket));
+ }
+}
+
+/**
+ * Handle cases where the `ping()`, `pong()`, or `send()` methods are called
+ * when the `readyState` attribute is `CLOSING` or `CLOSED`.
+ *
+ * @param {WebSocket} websocket The WebSocket instance
+ * @param {*} [data] The data to send
+ * @param {Function} [cb] Callback
+ * @private
+ */
+function sendAfterClose(websocket, data, cb) {
+ if (data) {
+ const length = toBuffer(data).length;
+
+ //
+ // The `_bufferedAmount` property is used only when the peer is a client and
+ // the opening handshake fails. Under these circumstances, in fact, the
+ // `setSocket()` method is not called, so the `_socket` and `_sender`
+ // properties are set to `null`.
+ //
+ if (websocket._socket) websocket._sender._bufferedBytes += length;
+ else websocket._bufferedAmount += length;
+ }
+
+ if (cb) {
+ const err = new Error(
+ `WebSocket is not open: readyState ${websocket.readyState} ` +
+ `(${readyStates[websocket.readyState]})`
+ );
+ cb(err);
+ }
+}
+
+/**
+ * The listener of the `Receiver` `'conclude'` event.
+ *
+ * @param {Number} code The status code
+ * @param {Buffer} reason The reason for closing
+ * @private
+ */
+function receiverOnConclude(code, reason) {
+ const websocket = this[kWebSocket];
+
+ websocket._closeFrameReceived = true;
+ websocket._closeMessage = reason;
+ websocket._closeCode = code;
+
+ if (websocket._socket[kWebSocket] === undefined) return;
+
+ websocket._socket.removeListener('data', socketOnData);
+ process.nextTick(resume, websocket._socket);
+
+ if (code === 1005) websocket.close();
+ else websocket.close(code, reason);
+}
+
+/**
+ * The listener of the `Receiver` `'drain'` event.
+ *
+ * @private
+ */
+function receiverOnDrain() {
+ const websocket = this[kWebSocket];
+
+ if (!websocket.isPaused) websocket._socket.resume();
+}
+
+/**
+ * The listener of the `Receiver` `'error'` event.
+ *
+ * @param {(RangeError|Error)} err The emitted error
+ * @private
+ */
+function receiverOnError(err) {
+ const websocket = this[kWebSocket];
+
+ if (websocket._socket[kWebSocket] !== undefined) {
+ websocket._socket.removeListener('data', socketOnData);
+
+ //
+ // On Node.js < 14.0.0 the `'error'` event is emitted synchronously. See
+ // https://github.com/websockets/ws/issues/1940.
+ //
+ process.nextTick(resume, websocket._socket);
+
+ websocket.close(err[kStatusCode]);
+ }
+
+ websocket.emit('error', err);
+}
+
+/**
+ * The listener of the `Receiver` `'finish'` event.
+ *
+ * @private
+ */
+function receiverOnFinish() {
+ this[kWebSocket].emitClose();
+}
+
+/**
+ * The listener of the `Receiver` `'message'` event.
+ *
+ * @param {Buffer|ArrayBuffer|Buffer[])} data The message
+ * @param {Boolean} isBinary Specifies whether the message is binary or not
+ * @private
+ */
+function receiverOnMessage(data, isBinary) {
+ this[kWebSocket].emit('message', data, isBinary);
+}
+
+/**
+ * The listener of the `Receiver` `'ping'` event.
+ *
+ * @param {Buffer} data The data included in the ping frame
+ * @private
+ */
+function receiverOnPing(data) {
+ const websocket = this[kWebSocket];
+
+ websocket.pong(data, !websocket._isServer, NOOP);
+ websocket.emit('ping', data);
+}
+
+/**
+ * The listener of the `Receiver` `'pong'` event.
+ *
+ * @param {Buffer} data The data included in the pong frame
+ * @private
+ */
+function receiverOnPong(data) {
+ this[kWebSocket].emit('pong', data);
+}
+
+/**
+ * Resume a readable stream
+ *
+ * @param {Readable} stream The readable stream
+ * @private
+ */
+function resume(stream) {
+ stream.resume();
+}
+
+/**
+ * The listener of the `net.Socket` `'close'` event.
+ *
+ * @private
+ */
+function socketOnClose() {
+ const websocket = this[kWebSocket];
+
+ this.removeListener('close', socketOnClose);
+ this.removeListener('data', socketOnData);
+ this.removeListener('end', socketOnEnd);
+
+ websocket._readyState = WebSocket.CLOSING;
+
+ let chunk;
+
+ //
+ // The close frame might not have been received or the `'end'` event emitted,
+ // for example, if the socket was destroyed due to an error. Ensure that the
+ // `receiver` stream is closed after writing any remaining buffered data to
+ // it. If the readable side of the socket is in flowing mode then there is no
+ // buffered data as everything has been already written and `readable.read()`
+ // will return `null`. If instead, the socket is paused, any possible buffered
+ // data will be read as a single chunk.
+ //
+ if (
+ !this._readableState.endEmitted &&
+ !websocket._closeFrameReceived &&
+ !websocket._receiver._writableState.errorEmitted &&
+ (chunk = websocket._socket.read()) !== null
+ ) {
+ websocket._receiver.write(chunk);
+ }
+
+ websocket._receiver.end();
+
+ this[kWebSocket] = undefined;
+
+ clearTimeout(websocket._closeTimer);
+
+ if (
+ websocket._receiver._writableState.finished ||
+ websocket._receiver._writableState.errorEmitted
+ ) {
+ websocket.emitClose();
+ } else {
+ websocket._receiver.on('error', receiverOnFinish);
+ websocket._receiver.on('finish', receiverOnFinish);
+ }
+}
+
+/**
+ * The listener of the `net.Socket` `'data'` event.
+ *
+ * @param {Buffer} chunk A chunk of data
+ * @private
+ */
+function socketOnData(chunk) {
+ if (!this[kWebSocket]._receiver.write(chunk)) {
+ this.pause();
+ }
+}
+
+/**
+ * The listener of the `net.Socket` `'end'` event.
+ *
+ * @private
+ */
+function socketOnEnd() {
+ const websocket = this[kWebSocket];
+
+ websocket._readyState = WebSocket.CLOSING;
+ websocket._receiver.end();
+ this.end();
+}
+
+/**
+ * The listener of the `net.Socket` `'error'` event.
+ *
+ * @private
+ */
+function socketOnError() {
+ const websocket = this[kWebSocket];
+
+ this.removeListener('error', socketOnError);
+ this.on('error', NOOP);
+
+ if (websocket) {
+ websocket._readyState = WebSocket.CLOSING;
+ this.destroy();
+ }
+}
diff --git a/testing/xpcshell/node-ws/package.json b/testing/xpcshell/node-ws/package.json
new file mode 100644
index 0000000000..27b9244a46
--- /dev/null
+++ b/testing/xpcshell/node-ws/package.json
@@ -0,0 +1,61 @@
+{
+ "name": "ws",
+ "version": "8.8.1",
+ "description": "Simple to use, blazing fast and thoroughly tested websocket client and server for Node.js",
+ "keywords": [
+ "HyBi",
+ "Push",
+ "RFC-6455",
+ "WebSocket",
+ "WebSockets",
+ "real-time"
+ ],
+ "homepage": "https://github.com/websockets/ws",
+ "bugs": "https://github.com/websockets/ws/issues",
+ "repository": "websockets/ws",
+ "author": "Einar Otto Stangvik <einaros@gmail.com> (http://2x.io)",
+ "license": "MIT",
+ "main": "index.js",
+ "exports": {
+ "import": "./wrapper.mjs",
+ "require": "./index.js"
+ },
+ "browser": "browser.js",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "files": [
+ "browser.js",
+ "index.js",
+ "lib/*.js",
+ "wrapper.mjs"
+ ],
+ "scripts": {
+ "test": "nyc --reporter=lcov --reporter=text mocha --throw-deprecation test/*.test.js",
+ "integration": "mocha --throw-deprecation test/*.integration.js",
+ "lint": "eslint --ignore-path .gitignore . && prettier --check --ignore-path .gitignore \"**/*.{json,md,yaml,yml}\""
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": "^5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ },
+ "devDependencies": {
+ "benchmark": "^2.1.4",
+ "bufferutil": "^4.0.1",
+ "eslint": "^8.0.0",
+ "eslint-config-prettier": "^8.1.0",
+ "eslint-plugin-prettier": "^4.0.0",
+ "mocha": "^8.4.0",
+ "nyc": "^15.0.0",
+ "prettier": "^2.0.5",
+ "utf-8-validate": "^5.0.2"
+ }
+}
diff --git a/testing/xpcshell/node-ws/test/autobahn-server.js b/testing/xpcshell/node-ws/test/autobahn-server.js
new file mode 100644
index 0000000000..24ade11497
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/autobahn-server.js
@@ -0,0 +1,17 @@
+'use strict';
+
+const WebSocket = require('../');
+
+const port = process.argv.length > 2 ? parseInt(process.argv[2]) : 9001;
+const wss = new WebSocket.Server({ port }, () => {
+ console.log(
+ `Listening to port ${port}. Use extra argument to define the port`
+ );
+});
+
+wss.on('connection', (ws) => {
+ ws.on('message', (data, isBinary) => {
+ ws.send(data, { binary: isBinary });
+ });
+ ws.on('error', (e) => console.error(e));
+});
diff --git a/testing/xpcshell/node-ws/test/autobahn.js b/testing/xpcshell/node-ws/test/autobahn.js
new file mode 100644
index 0000000000..51532fc52e
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/autobahn.js
@@ -0,0 +1,39 @@
+'use strict';
+
+const WebSocket = require('../');
+
+let currentTest = 1;
+let testCount;
+
+function nextTest() {
+ let ws;
+
+ if (currentTest > testCount) {
+ ws = new WebSocket('ws://localhost:9001/updateReports?agent=ws');
+ return;
+ }
+
+ console.log(`Running test case ${currentTest}/${testCount}`);
+
+ ws = new WebSocket(
+ `ws://localhost:9001/runCase?case=${currentTest}&agent=ws`
+ );
+ ws.on('message', (data, isBinary) => {
+ ws.send(data, { binary: isBinary });
+ });
+ ws.on('close', () => {
+ currentTest++;
+ process.nextTick(nextTest);
+ });
+ ws.on('error', (e) => console.error(e));
+}
+
+const ws = new WebSocket('ws://localhost:9001/getCaseCount');
+ws.on('message', (data) => {
+ testCount = parseInt(data);
+});
+ws.on('close', () => {
+ if (testCount > 0) {
+ nextTest();
+ }
+});
diff --git a/testing/xpcshell/node-ws/test/buffer-util.test.js b/testing/xpcshell/node-ws/test/buffer-util.test.js
new file mode 100644
index 0000000000..a6b84c94b1
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/buffer-util.test.js
@@ -0,0 +1,15 @@
+'use strict';
+
+const assert = require('assert');
+
+const { concat } = require('../lib/buffer-util');
+
+describe('bufferUtil', () => {
+ describe('concat', () => {
+ it('never returns uninitialized data', () => {
+ const buf = concat([Buffer.from([1, 2]), Buffer.from([3, 4])], 6);
+
+ assert.ok(buf.equals(Buffer.from([1, 2, 3, 4])));
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/create-websocket-stream.test.js b/testing/xpcshell/node-ws/test/create-websocket-stream.test.js
new file mode 100644
index 0000000000..4d51958cd9
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/create-websocket-stream.test.js
@@ -0,0 +1,598 @@
+'use strict';
+
+const assert = require('assert');
+const EventEmitter = require('events');
+const { createServer } = require('http');
+const { Duplex } = require('stream');
+const { randomBytes } = require('crypto');
+
+const createWebSocketStream = require('../lib/stream');
+const Sender = require('../lib/sender');
+const WebSocket = require('..');
+const { EMPTY_BUFFER } = require('../lib/constants');
+
+describe('createWebSocketStream', () => {
+ it('is exposed as a property of the `WebSocket` class', () => {
+ assert.strictEqual(WebSocket.createWebSocketStream, createWebSocketStream);
+ });
+
+ it('returns a `Duplex` stream', () => {
+ const duplex = createWebSocketStream(new EventEmitter());
+
+ assert.ok(duplex instanceof Duplex);
+ });
+
+ it('passes the options object to the `Duplex` constructor', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws, {
+ allowHalfOpen: false,
+ encoding: 'utf8'
+ });
+
+ duplex.on('data', (chunk) => {
+ assert.strictEqual(chunk, 'hi');
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.send(Buffer.from('hi'));
+ ws.close();
+ });
+ });
+
+ describe('The returned stream', () => {
+ it('buffers writes if `readyState` is `CONNECTING`', (done) => {
+ const chunk = randomBytes(1024);
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ assert.strictEqual(ws.readyState, WebSocket.CONNECTING);
+
+ const duplex = createWebSocketStream(ws);
+
+ duplex.write(chunk);
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ ws.on('close', (code, reason) => {
+ assert.deepStrictEqual(message, chunk);
+ assert.ok(isBinary);
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ });
+
+ ws.close();
+ });
+ });
+
+ it('errors if a write occurs when `readyState` is `CLOSING`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('error', (err) => {
+ assert.ok(duplex.destroyed);
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 2 (CLOSING)'
+ );
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+ });
+
+ ws.on('open', () => {
+ ws._receiver.on('conclude', () => {
+ duplex.write('hi');
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('errors if a write occurs when `readyState` is `CLOSED`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('error', (err) => {
+ assert.ok(duplex.destroyed);
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 3 (CLOSED)'
+ );
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+ });
+
+ ws.on('close', () => {
+ duplex.write('hi');
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('does not error if `_final()` is called while connecting', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ assert.strictEqual(ws.readyState, WebSocket.CONNECTING);
+
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+
+ duplex.resume();
+ duplex.end();
+ });
+ });
+
+ it('makes `_final()` a noop if no socket is assigned', (done) => {
+ const server = createServer();
+
+ server.on('upgrade', (request, socket) => {
+ socket.on('end', socket.end);
+
+ const headers = [
+ 'HTTP/1.1 101 Switching Protocols',
+ 'Upgrade: websocket',
+ 'Connection: Upgrade',
+ 'Sec-WebSocket-Accept: foo'
+ ];
+
+ socket.write(headers.concat('\r\n').join('\r\n'));
+ });
+
+ server.listen(() => {
+ const called = [];
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+ const duplex = WebSocket.createWebSocketStream(ws);
+ const final = duplex._final;
+
+ duplex._final = (callback) => {
+ called.push('final');
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.strictEqual(ws._socket, null);
+
+ final(callback);
+ };
+
+ duplex.on('error', (err) => {
+ called.push('error');
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Invalid Sec-WebSocket-Accept header'
+ );
+ });
+
+ duplex.on('finish', () => {
+ called.push('finish');
+ });
+
+ duplex.on('close', () => {
+ assert.deepStrictEqual(called, ['final', 'error']);
+ server.close(done);
+ });
+
+ ws.on('upgrade', () => {
+ process.nextTick(() => {
+ duplex.end();
+ });
+ });
+ });
+ });
+
+ it('reemits errors', (done) => {
+ let duplexCloseEventEmitted = false;
+ let serverClientCloseEventEmitted = false;
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ duplex.on('close', () => {
+ duplexCloseEventEmitted = true;
+ if (serverClientCloseEventEmitted) wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws._socket.write(Buffer.from([0x85, 0x00]));
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1002);
+ assert.deepStrictEqual(reason, EMPTY_BUFFER);
+
+ serverClientCloseEventEmitted = true;
+ if (duplexCloseEventEmitted) wss.close(done);
+ });
+ });
+ });
+
+ it('does not swallow errors that may occur while destroying', (done) => {
+ const frame = Buffer.concat(
+ Sender.frame(Buffer.from([0x22, 0xfa, 0xec, 0x78]), {
+ fin: true,
+ rsv1: true,
+ opcode: 0x02,
+ mask: false,
+ readOnly: false
+ })
+ );
+
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'Z_DATA_ERROR');
+ assert.strictEqual(err.errno, -3);
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+ });
+
+ let bytesRead = 0;
+
+ ws.on('open', () => {
+ ws._socket.on('data', (chunk) => {
+ bytesRead += chunk.length;
+ if (bytesRead === frame.length) duplex.destroy();
+ });
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws._socket.write(frame);
+ });
+ });
+
+ it("does not suppress the throwing behavior of 'error' events", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ createWebSocketStream(ws);
+ });
+
+ wss.on('connection', (ws) => {
+ ws._socket.write(Buffer.from([0x85, 0x00]));
+ });
+
+ assert.strictEqual(process.listenerCount('uncaughtException'), 1);
+
+ const [listener] = process.listeners('uncaughtException');
+
+ process.removeAllListeners('uncaughtException');
+ process.once('uncaughtException', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ process.on('uncaughtException', listener);
+ wss.close(done);
+ });
+ });
+
+ it("is destroyed after 'end' and 'finish' are emitted (1/2)", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const events = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('end', () => {
+ events.push('end');
+ assert.ok(duplex.destroyed);
+ });
+
+ duplex.on('close', () => {
+ assert.deepStrictEqual(events, ['finish', 'end']);
+ wss.close(done);
+ });
+
+ duplex.on('finish', () => {
+ events.push('finish');
+ assert.ok(!duplex.destroyed);
+ assert.ok(duplex.readable);
+
+ duplex.resume();
+ });
+
+ ws.on('close', () => {
+ duplex.end();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.send('foo');
+ ws.close();
+ });
+ });
+
+ it("is destroyed after 'end' and 'finish' are emitted (2/2)", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const events = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('end', () => {
+ events.push('end');
+ assert.ok(!duplex.destroyed);
+ assert.ok(duplex.writable);
+
+ duplex.end();
+ });
+
+ duplex.on('close', () => {
+ assert.deepStrictEqual(events, ['end', 'finish']);
+ wss.close(done);
+ });
+
+ duplex.on('finish', () => {
+ events.push('finish');
+ });
+
+ duplex.resume();
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('handles backpressure (1/3)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ // eslint-disable-next-line no-unused-vars
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ const duplex = createWebSocketStream(ws);
+
+ duplex.resume();
+
+ duplex.on('drain', () => {
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+
+ duplex.end();
+ });
+
+ const chunk = randomBytes(1024);
+ let ret;
+
+ do {
+ ret = duplex.write(chunk);
+ } while (ret !== false);
+ });
+ });
+
+ it('handles backpressure (2/3)', (done) => {
+ const wss = new WebSocket.Server(
+ { port: 0, perMessageDeflate: true },
+ () => {
+ const called = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+ const read = duplex._read;
+
+ duplex._read = () => {
+ duplex._read = read;
+ called.push('read');
+ assert.ok(ws._receiver._writableState.needDrain);
+ read();
+ assert.ok(ws._socket.isPaused());
+ };
+
+ ws.on('open', () => {
+ ws._socket.on('pause', () => {
+ duplex.resume();
+ });
+
+ ws._receiver.on('drain', () => {
+ called.push('drain');
+ assert.ok(!ws._socket.isPaused());
+ duplex.end();
+ });
+
+ const opts = {
+ fin: true,
+ opcode: 0x02,
+ mask: false,
+ readOnly: false
+ };
+
+ const list = [
+ ...Sender.frame(randomBytes(16 * 1024), { rsv1: false, ...opts }),
+ ...Sender.frame(Buffer.alloc(1), { rsv1: true, ...opts })
+ ];
+
+ // This hack is used because there is no guarantee that more than
+ // 16 KiB will be sent as a single TCP packet.
+ ws._socket.push(Buffer.concat(list));
+ });
+
+ duplex.on('close', () => {
+ assert.deepStrictEqual(called, ['read', 'drain']);
+ wss.close(done);
+ });
+ }
+ );
+ });
+
+ it('handles backpressure (3/3)', (done) => {
+ const wss = new WebSocket.Server(
+ { port: 0, perMessageDeflate: true },
+ () => {
+ const called = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+ const read = duplex._read;
+
+ duplex._read = () => {
+ called.push('read');
+ assert.ok(!ws._receiver._writableState.needDrain);
+ read();
+ assert.ok(!ws._socket.isPaused());
+ duplex.end();
+ };
+
+ ws.on('open', () => {
+ ws._receiver.on('drain', () => {
+ called.push('drain');
+ assert.ok(ws._socket.isPaused());
+ duplex.resume();
+ });
+
+ const opts = {
+ fin: true,
+ opcode: 0x02,
+ mask: false,
+ readOnly: false
+ };
+
+ const list = [
+ ...Sender.frame(randomBytes(16 * 1024), { rsv1: false, ...opts }),
+ ...Sender.frame(Buffer.alloc(1), { rsv1: true, ...opts })
+ ];
+
+ ws._socket.push(Buffer.concat(list));
+ });
+
+ duplex.on('close', () => {
+ assert.deepStrictEqual(called, ['drain', 'read']);
+ wss.close(done);
+ });
+ }
+ );
+ });
+
+ it('can be destroyed (1/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const error = new Error('Oops');
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('error', (err) => {
+ assert.strictEqual(err, error);
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+ });
+
+ ws.on('open', () => {
+ duplex.destroy(error);
+ });
+ });
+ });
+
+ it('can be destroyed (2/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+
+ ws.on('open', () => {
+ duplex.destroy();
+ });
+ });
+ });
+
+ it('converts text messages to strings in readable object mode', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const events = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws, { readableObjectMode: true });
+
+ duplex.on('data', (data) => {
+ events.push('data');
+ assert.strictEqual(data, 'foo');
+ });
+
+ duplex.on('end', () => {
+ events.push('end');
+ duplex.end();
+ });
+
+ duplex.on('close', () => {
+ assert.deepStrictEqual(events, ['data', 'end']);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.send('foo');
+ ws.close();
+ });
+ });
+
+ it('resumes the socket if `readyState` is `CLOSING`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const duplex = createWebSocketStream(ws);
+
+ ws.on('message', () => {
+ assert.ok(ws._socket.isPaused());
+
+ duplex.on('close', () => {
+ wss.close(done);
+ });
+
+ duplex.end();
+
+ process.nextTick(() => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ duplex.resume();
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.send(randomBytes(16 * 1024));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/event-target.test.js b/testing/xpcshell/node-ws/test/event-target.test.js
new file mode 100644
index 0000000000..5caaa5c273
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/event-target.test.js
@@ -0,0 +1,253 @@
+'use strict';
+
+const assert = require('assert');
+
+const {
+ CloseEvent,
+ ErrorEvent,
+ Event,
+ MessageEvent
+} = require('../lib/event-target');
+
+describe('Event', () => {
+ describe('#ctor', () => {
+ it('takes a `type` argument', () => {
+ const event = new Event('foo');
+
+ assert.strictEqual(event.type, 'foo');
+ });
+ });
+
+ describe('Properties', () => {
+ describe('`target`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ Event.prototype,
+ 'target'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to `null`', () => {
+ const event = new Event('foo');
+
+ assert.strictEqual(event.target, null);
+ });
+ });
+
+ describe('`type`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ Event.prototype,
+ 'type'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+ });
+ });
+});
+
+describe('CloseEvent', () => {
+ it('inherits from `Event`', () => {
+ assert.ok(CloseEvent.prototype instanceof Event);
+ });
+
+ describe('#ctor', () => {
+ it('takes a `type` argument', () => {
+ const event = new CloseEvent('foo');
+
+ assert.strictEqual(event.type, 'foo');
+ });
+
+ it('takes an optional `options` argument', () => {
+ const event = new CloseEvent('close', {
+ code: 1000,
+ reason: 'foo',
+ wasClean: true
+ });
+
+ assert.strictEqual(event.type, 'close');
+ assert.strictEqual(event.code, 1000);
+ assert.strictEqual(event.reason, 'foo');
+ assert.strictEqual(event.wasClean, true);
+ });
+ });
+
+ describe('Properties', () => {
+ describe('`code`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ CloseEvent.prototype,
+ 'code'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to 0', () => {
+ const event = new CloseEvent('close');
+
+ assert.strictEqual(event.code, 0);
+ });
+ });
+
+ describe('`reason`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ CloseEvent.prototype,
+ 'reason'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to an empty string', () => {
+ const event = new CloseEvent('close');
+
+ assert.strictEqual(event.reason, '');
+ });
+ });
+
+ describe('`wasClean`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ CloseEvent.prototype,
+ 'wasClean'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to false', () => {
+ const event = new CloseEvent('close');
+
+ assert.strictEqual(event.wasClean, false);
+ });
+ });
+ });
+});
+
+describe('ErrorEvent', () => {
+ it('inherits from `Event`', () => {
+ assert.ok(ErrorEvent.prototype instanceof Event);
+ });
+
+ describe('#ctor', () => {
+ it('takes a `type` argument', () => {
+ const event = new ErrorEvent('foo');
+
+ assert.strictEqual(event.type, 'foo');
+ });
+
+ it('takes an optional `options` argument', () => {
+ const error = new Error('Oops');
+ const event = new ErrorEvent('error', { error, message: error.message });
+
+ assert.strictEqual(event.type, 'error');
+ assert.strictEqual(event.error, error);
+ assert.strictEqual(event.message, error.message);
+ });
+ });
+
+ describe('Properties', () => {
+ describe('`error`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ ErrorEvent.prototype,
+ 'error'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to `null`', () => {
+ const event = new ErrorEvent('error');
+
+ assert.strictEqual(event.error, null);
+ });
+ });
+
+ describe('`message`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ ErrorEvent.prototype,
+ 'message'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to an empty string', () => {
+ const event = new ErrorEvent('error');
+
+ assert.strictEqual(event.message, '');
+ });
+ });
+ });
+});
+
+describe('MessageEvent', () => {
+ it('inherits from `Event`', () => {
+ assert.ok(MessageEvent.prototype instanceof Event);
+ });
+
+ describe('#ctor', () => {
+ it('takes a `type` argument', () => {
+ const event = new MessageEvent('foo');
+
+ assert.strictEqual(event.type, 'foo');
+ });
+
+ it('takes an optional `options` argument', () => {
+ const event = new MessageEvent('message', { data: 'bar' });
+
+ assert.strictEqual(event.type, 'message');
+ assert.strictEqual(event.data, 'bar');
+ });
+ });
+
+ describe('Properties', () => {
+ describe('`data`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ MessageEvent.prototype,
+ 'data'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to `null`', () => {
+ const event = new MessageEvent('message');
+
+ assert.strictEqual(event.data, null);
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/extension.test.js b/testing/xpcshell/node-ws/test/extension.test.js
new file mode 100644
index 0000000000..a4b3e749d0
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/extension.test.js
@@ -0,0 +1,190 @@
+'use strict';
+
+const assert = require('assert');
+
+const { format, parse } = require('../lib/extension');
+
+describe('extension', () => {
+ describe('parse', () => {
+ it('parses a single extension', () => {
+ assert.deepStrictEqual(parse('foo'), {
+ foo: [{ __proto__: null }],
+ __proto__: null
+ });
+ });
+
+ it('parses params', () => {
+ assert.deepStrictEqual(parse('foo;bar;baz=1;bar=2'), {
+ foo: [{ bar: [true, '2'], baz: ['1'], __proto__: null }],
+ __proto__: null
+ });
+ });
+
+ it('parses multiple extensions', () => {
+ assert.deepStrictEqual(parse('foo,bar;baz,foo;baz'), {
+ foo: [{ __proto__: null }, { baz: [true], __proto__: null }],
+ bar: [{ baz: [true], __proto__: null }],
+ __proto__: null
+ });
+ });
+
+ it('parses quoted params', () => {
+ assert.deepStrictEqual(parse('foo;bar="hi"'), {
+ foo: [{ bar: ['hi'], __proto__: null }],
+ __proto__: null
+ });
+ assert.deepStrictEqual(parse('foo;bar="\\0"'), {
+ foo: [{ bar: ['0'], __proto__: null }],
+ __proto__: null
+ });
+ assert.deepStrictEqual(parse('foo;bar="b\\a\\z"'), {
+ foo: [{ bar: ['baz'], __proto__: null }],
+ __proto__: null
+ });
+ assert.deepStrictEqual(parse('foo;bar="b\\az";bar'), {
+ foo: [{ bar: ['baz', true], __proto__: null }],
+ __proto__: null
+ });
+ assert.throws(
+ () => parse('foo;bar="baz"qux'),
+ /^SyntaxError: Unexpected character at index 13$/
+ );
+ assert.throws(
+ () => parse('foo;bar="baz" qux'),
+ /^SyntaxError: Unexpected character at index 14$/
+ );
+ });
+
+ it('works with names that match `Object.prototype` property names', () => {
+ assert.deepStrictEqual(parse('hasOwnProperty, toString'), {
+ hasOwnProperty: [{ __proto__: null }],
+ toString: [{ __proto__: null }],
+ __proto__: null
+ });
+ assert.deepStrictEqual(parse('foo;constructor'), {
+ foo: [{ constructor: [true], __proto__: null }],
+ __proto__: null
+ });
+ });
+
+ it('ignores the optional white spaces', () => {
+ const header = 'foo; bar\t; \tbaz=1\t ; bar="1"\t\t, \tqux\t ;norf';
+
+ assert.deepStrictEqual(parse(header), {
+ foo: [{ bar: [true, '1'], baz: ['1'], __proto__: null }],
+ qux: [{ norf: [true], __proto__: null }],
+ __proto__: null
+ });
+ });
+
+ it('throws an error if a name is empty', () => {
+ [
+ [',', 0],
+ ['foo,,', 4],
+ ['foo, ,', 6],
+ ['foo;=', 4],
+ ['foo; =', 5],
+ ['foo;;', 4],
+ ['foo; ;', 5],
+ ['foo;bar=,', 8],
+ ['foo;bar=""', 9]
+ ].forEach((element) => {
+ assert.throws(
+ () => parse(element[0]),
+ new RegExp(
+ `^SyntaxError: Unexpected character at index ${element[1]}$`
+ )
+ );
+ });
+ });
+
+ it('throws an error if a white space is misplaced', () => {
+ [
+ [' foo', 0],
+ ['f oo', 2],
+ ['foo;ba r', 7],
+ ['foo;bar =', 8],
+ ['foo;bar= ', 8],
+ ['foo;bar=ba z', 11]
+ ].forEach((element) => {
+ assert.throws(
+ () => parse(element[0]),
+ new RegExp(
+ `^SyntaxError: Unexpected character at index ${element[1]}$`
+ )
+ );
+ });
+ });
+
+ it('throws an error if a token contains invalid characters', () => {
+ [
+ ['f@o', 1],
+ ['f\\oo', 1],
+ ['"foo"', 0],
+ ['f"oo"', 1],
+ ['foo;b@r', 5],
+ ['foo;b\\ar', 5],
+ ['foo;"bar"', 4],
+ ['foo;b"ar"', 5],
+ ['foo;bar=b@z', 9],
+ ['foo;bar=b\\az ', 9],
+ ['foo;bar="b@z"', 10],
+ ['foo;bar="baz;"', 12],
+ ['foo;bar=b"az"', 9],
+ ['foo;bar="\\\\"', 10]
+ ].forEach((element) => {
+ assert.throws(
+ () => parse(element[0]),
+ new RegExp(
+ `^SyntaxError: Unexpected character at index ${element[1]}$`
+ )
+ );
+ });
+ });
+
+ it('throws an error if the header value ends prematurely', () => {
+ [
+ '',
+ 'foo ',
+ 'foo\t',
+ 'foo, ',
+ 'foo;',
+ 'foo;bar ',
+ 'foo;bar,',
+ 'foo;bar; ',
+ 'foo;bar=',
+ 'foo;bar="baz',
+ 'foo;bar="1\\',
+ 'foo;bar="baz" '
+ ].forEach((header) => {
+ assert.throws(
+ () => parse(header),
+ /^SyntaxError: Unexpected end of input$/
+ );
+ });
+ });
+ });
+
+ describe('format', () => {
+ it('formats a single extension', () => {
+ const extensions = format({ foo: {} });
+
+ assert.strictEqual(extensions, 'foo');
+ });
+
+ it('formats params', () => {
+ const extensions = format({ foo: { bar: [true, 2], baz: 1 } });
+
+ assert.strictEqual(extensions, 'foo; bar; bar=2; baz=1');
+ });
+
+ it('formats multiple extensions', () => {
+ const extensions = format({
+ foo: [{}, { baz: true }],
+ bar: { baz: true }
+ });
+
+ assert.strictEqual(extensions, 'foo, foo; baz, bar; baz');
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/fixtures/ca-certificate.pem b/testing/xpcshell/node-ws/test/fixtures/ca-certificate.pem
new file mode 100644
index 0000000000..0f1658821d
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/fixtures/ca-certificate.pem
@@ -0,0 +1,12 @@
+-----BEGIN CERTIFICATE-----
+MIIBtTCCAVoCCQCXqK2FegDgiDAKBggqhkjOPQQDAjBhMQswCQYDVQQGEwJJVDEQ
+MA4GA1UECAwHUGVydWdpYTEQMA4GA1UEBwwHRm9saWdubzETMBEGA1UECgwKd2Vi
+c29ja2V0czELMAkGA1UECwwCd3MxDDAKBgNVBAMMA2NhMTAgFw0yMTA1MjYxOTA1
+MjdaGA8yMTIxMDUwMjE5MDUyN1owYTELMAkGA1UEBhMCSVQxEDAOBgNVBAgMB1Bl
+cnVnaWExEDAOBgNVBAcMB0ZvbGlnbm8xEzARBgNVBAoMCndlYnNvY2tldHMxCzAJ
+BgNVBAsMAndzMQwwCgYDVQQDDANjYTEwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
+AASHE75QDQN6XNo/711YSbckaa8r4lt0hGkgtADaBFT9Qn9gcm5omapePZT76Ff9
+rwjMcS+YPXS7J7bk+QHLihJMMAoGCCqGSM49BAMCA0kAMEYCIQCUMdUih+sE0ZTu
+ORlcKiM8DKyiKkGU4Ty+dslz6nVJjAIhAMcSy0SBsBDgsai1s9aCmAGJXCijNb6g
+vfWaatgq+ma2
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/node-ws/test/fixtures/ca-key.pem b/testing/xpcshell/node-ws/test/fixtures/ca-key.pem
new file mode 100644
index 0000000000..a9352fb6a2
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/fixtures/ca-key.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIAa/Onpk27cLkqzje69Bac8yG+LTBXIPWT8yGlyjEFbboAoGCCqGSM49
+AwEHoUQDQgAEhxO+UA0DelzaP+9dWEm3JGmvK+JbdIRpILQA2gRU/UJ/YHJuaJmq
+Xj2U++hX/a8IzHEvmD10uye25PkBy4oSTA==
+-----END EC PRIVATE KEY-----
diff --git a/testing/xpcshell/node-ws/test/fixtures/certificate.pem b/testing/xpcshell/node-ws/test/fixtures/certificate.pem
new file mode 100644
index 0000000000..538553ee08
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/fixtures/certificate.pem
@@ -0,0 +1,12 @@
+-----BEGIN CERTIFICATE-----
+MIIBujCCAWACCQDjKdAMt3mZhDAKBggqhkjOPQQDAjBkMQswCQYDVQQGEwJJVDEQ
+MA4GA1UECAwHUGVydWdpYTEQMA4GA1UEBwwHRm9saWdubzETMBEGA1UECgwKd2Vi
+c29ja2V0czELMAkGA1UECwwCd3MxDzANBgNVBAMMBnNlcnZlcjAgFw0yMTA1MjYx
+OTEwMjlaGA8yMTIxMDUwMjE5MTAyOVowZDELMAkGA1UEBhMCSVQxEDAOBgNVBAgM
+B1BlcnVnaWExEDAOBgNVBAcMB0ZvbGlnbm8xEzARBgNVBAoMCndlYnNvY2tldHMx
+CzAJBgNVBAsMAndzMQ8wDQYDVQQDDAZzZXJ2ZXIwWTATBgcqhkjOPQIBBggqhkjO
+PQMBBwNCAAQKhyRhdSVOecbJU4O5XkB/iGodbnCOqmchs4TXmE3Prv5SrNDhODDv
+rOWTXwR3/HrrdNfOzPdb54amu8POwpohMAoGCCqGSM49BAMCA0gAMEUCIHMRUSPl
+8FGkDLl8KF1A+SbT2ds3zUOLdYvj30Z2SKSVAiEA84U/R1ly9wf5Rzv93sTHI99o
+KScsr/PHN8rT2pop5pk=
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/node-ws/test/fixtures/client-certificate.pem b/testing/xpcshell/node-ws/test/fixtures/client-certificate.pem
new file mode 100644
index 0000000000..0e20560b8c
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/fixtures/client-certificate.pem
@@ -0,0 +1,12 @@
+-----BEGIN CERTIFICATE-----
+MIIBtzCCAV0CCQDDIX2dKuKP0zAKBggqhkjOPQQDAjBhMQswCQYDVQQGEwJJVDEQ
+MA4GA1UECAwHUGVydWdpYTEQMA4GA1UEBwwHRm9saWdubzETMBEGA1UECgwKd2Vi
+c29ja2V0czELMAkGA1UECwwCd3MxDDAKBgNVBAMMA2NhMTAgFw0yMTA1MjYxOTE3
+NDJaGA8yMTIxMDUwMjE5MTc0MlowZDELMAkGA1UEBhMCSVQxEDAOBgNVBAgMB1Bl
+cnVnaWExEDAOBgNVBAcMB0ZvbGlnbm8xEzARBgNVBAoMCndlYnNvY2tldHMxCzAJ
+BgNVBAsMAndzMQ8wDQYDVQQDDAZhZ2VudDEwWTATBgcqhkjOPQIBBggqhkjOPQMB
+BwNCAATwHlNS2b13TMhBTSWBXAn6TEPxrsvG93ZZyUlmrEMOXSMX2hI7sv660YNj
++eGyE2CV33XsQxV3TUqi51fUjIu8MAoGCCqGSM49BAMCA0gAMEUCIQCxsqBre+Do
+jnfg6XmCaB0fywNzcDlvdoVNuNAWfVNrSAIgDQmbM0mXZaSAkf4sgtKdXnpE3vrb
+MElb457Bi3B+rkE=
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/node-ws/test/fixtures/client-key.pem b/testing/xpcshell/node-ws/test/fixtures/client-key.pem
new file mode 100644
index 0000000000..e034f57fc2
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/fixtures/client-key.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIKVGskK0UR86WwMo5H0+hNAFGRBYsEevK3ye4y1YberVoAoGCCqGSM49
+AwEHoUQDQgAE8B5TUtm9d0zIQU0lgVwJ+kxD8a7Lxvd2WclJZqxDDl0jF9oSO7L+
+utGDY/nhshNgld917EMVd01KoudX1IyLvA==
+-----END EC PRIVATE KEY-----
diff --git a/testing/xpcshell/node-ws/test/fixtures/key.pem b/testing/xpcshell/node-ws/test/fixtures/key.pem
new file mode 100644
index 0000000000..05bfdb71ed
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/fixtures/key.pem
@@ -0,0 +1,5 @@
+-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEIIjLz7YEWIrsGem2+YV8eJhHhetsjYIrjuqJLbdG7B3zoAoGCCqGSM49
+AwEHoUQDQgAECockYXUlTnnGyVODuV5Af4hqHW5wjqpnIbOE15hNz67+UqzQ4Tgw
+76zlk18Ed/x663TXzsz3W+eGprvDzsKaIQ==
+-----END EC PRIVATE KEY-----
diff --git a/testing/xpcshell/node-ws/test/limiter.test.js b/testing/xpcshell/node-ws/test/limiter.test.js
new file mode 100644
index 0000000000..95141f0f5c
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/limiter.test.js
@@ -0,0 +1,41 @@
+'use strict';
+
+const assert = require('assert');
+
+const Limiter = require('../lib/limiter');
+
+describe('Limiter', () => {
+ describe('#ctor', () => {
+ it('takes a `concurrency` argument', () => {
+ const limiter = new Limiter(0);
+
+ assert.strictEqual(limiter.concurrency, Infinity);
+ });
+ });
+
+ describe('#kRun', () => {
+ it('limits the number of jobs allowed to run concurrently', (done) => {
+ const limiter = new Limiter(1);
+
+ limiter.add((callback) => {
+ setImmediate(() => {
+ callback();
+
+ assert.strictEqual(limiter.jobs.length, 0);
+ assert.strictEqual(limiter.pending, 1);
+ });
+ });
+
+ limiter.add((callback) => {
+ setImmediate(() => {
+ callback();
+
+ assert.strictEqual(limiter.pending, 0);
+ done();
+ });
+ });
+
+ assert.strictEqual(limiter.jobs.length, 1);
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/permessage-deflate.test.js b/testing/xpcshell/node-ws/test/permessage-deflate.test.js
new file mode 100644
index 0000000000..a9c9bf165c
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/permessage-deflate.test.js
@@ -0,0 +1,647 @@
+'use strict';
+
+const assert = require('assert');
+
+const PerMessageDeflate = require('../lib/permessage-deflate');
+const extension = require('../lib/extension');
+
+describe('PerMessageDeflate', () => {
+ describe('#offer', () => {
+ it('creates an offer', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+
+ assert.deepStrictEqual(perMessageDeflate.offer(), {
+ client_max_window_bits: true
+ });
+ });
+
+ it('uses the configuration options', () => {
+ const perMessageDeflate = new PerMessageDeflate({
+ serverNoContextTakeover: true,
+ clientNoContextTakeover: true,
+ serverMaxWindowBits: 10,
+ clientMaxWindowBits: 11
+ });
+
+ assert.deepStrictEqual(perMessageDeflate.offer(), {
+ server_no_context_takeover: true,
+ client_no_context_takeover: true,
+ server_max_window_bits: 10,
+ client_max_window_bits: 11
+ });
+ });
+ });
+
+ describe('#accept', () => {
+ it('throws an error if a parameter has multiple values', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const extensions = extension.parse(
+ 'permessage-deflate; server_no_context_takeover; server_no_context_takeover'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: Parameter "server_no_context_takeover" must have only a single value$/
+ );
+ });
+
+ it('throws an error if a parameter has an invalid name', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const extensions = extension.parse('permessage-deflate;foo');
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: Unknown parameter "foo"$/
+ );
+ });
+
+ it('throws an error if client_no_context_takeover has a value', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover=10'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "client_no_context_takeover": 10$/
+ );
+ });
+
+ it('throws an error if server_no_context_takeover has a value', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const extensions = extension.parse(
+ 'permessage-deflate; server_no_context_takeover=10'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "server_no_context_takeover": 10$/
+ );
+ });
+
+ it('throws an error if server_max_window_bits has an invalid value', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+
+ let extensions = extension.parse(
+ 'permessage-deflate; server_max_window_bits=7'
+ );
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "server_max_window_bits": 7$/
+ );
+
+ extensions = extension.parse(
+ 'permessage-deflate; server_max_window_bits'
+ );
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "server_max_window_bits": true$/
+ );
+ });
+
+ describe('As server', () => {
+ it('accepts an offer with no parameters', () => {
+ const perMessageDeflate = new PerMessageDeflate({}, true);
+
+ assert.deepStrictEqual(perMessageDeflate.accept([{}]), {});
+ });
+
+ it('accepts an offer with parameters', () => {
+ const perMessageDeflate = new PerMessageDeflate({}, true);
+ const extensions = extension.parse(
+ 'permessage-deflate; server_no_context_takeover; ' +
+ 'client_no_context_takeover; server_max_window_bits=10; ' +
+ 'client_max_window_bits=11'
+ );
+
+ assert.deepStrictEqual(
+ perMessageDeflate.accept(extensions['permessage-deflate']),
+ {
+ server_no_context_takeover: true,
+ client_no_context_takeover: true,
+ server_max_window_bits: 10,
+ client_max_window_bits: 11,
+ __proto__: null
+ }
+ );
+ });
+
+ it('prefers the configuration options', () => {
+ const perMessageDeflate = new PerMessageDeflate(
+ {
+ serverNoContextTakeover: true,
+ clientNoContextTakeover: true,
+ serverMaxWindowBits: 12,
+ clientMaxWindowBits: 11
+ },
+ true
+ );
+ const extensions = extension.parse(
+ 'permessage-deflate; server_max_window_bits=14; client_max_window_bits=13'
+ );
+
+ assert.deepStrictEqual(
+ perMessageDeflate.accept(extensions['permessage-deflate']),
+ {
+ server_no_context_takeover: true,
+ client_no_context_takeover: true,
+ server_max_window_bits: 12,
+ client_max_window_bits: 11,
+ __proto__: null
+ }
+ );
+ });
+
+ it('accepts the first supported offer', () => {
+ const perMessageDeflate = new PerMessageDeflate(
+ { serverMaxWindowBits: 11 },
+ true
+ );
+ const extensions = extension.parse(
+ 'permessage-deflate; server_max_window_bits=10, permessage-deflate'
+ );
+
+ assert.deepStrictEqual(
+ perMessageDeflate.accept(extensions['permessage-deflate']),
+ {
+ server_max_window_bits: 11,
+ __proto__: null
+ }
+ );
+ });
+
+ it('throws an error if server_no_context_takeover is unsupported', () => {
+ const perMessageDeflate = new PerMessageDeflate(
+ { serverNoContextTakeover: false },
+ true
+ );
+ const extensions = extension.parse(
+ 'permessage-deflate; server_no_context_takeover'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: None of the extension offers can be accepted$/
+ );
+ });
+
+ it('throws an error if server_max_window_bits is unsupported', () => {
+ const perMessageDeflate = new PerMessageDeflate(
+ { serverMaxWindowBits: false },
+ true
+ );
+ const extensions = extension.parse(
+ 'permessage-deflate; server_max_window_bits=10'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: None of the extension offers can be accepted$/
+ );
+ });
+
+ it('throws an error if server_max_window_bits is less than configuration', () => {
+ const perMessageDeflate = new PerMessageDeflate(
+ { serverMaxWindowBits: 11 },
+ true
+ );
+ const extensions = extension.parse(
+ 'permessage-deflate; server_max_window_bits=10'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: None of the extension offers can be accepted$/
+ );
+ });
+
+ it('throws an error if client_max_window_bits is unsupported on client', () => {
+ const perMessageDeflate = new PerMessageDeflate(
+ { clientMaxWindowBits: 10 },
+ true
+ );
+ const extensions = extension.parse('permessage-deflate');
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: None of the extension offers can be accepted$/
+ );
+ });
+
+ it('throws an error if client_max_window_bits has an invalid value', () => {
+ const perMessageDeflate = new PerMessageDeflate({}, true);
+
+ const extensions = extension.parse(
+ 'permessage-deflate; client_max_window_bits=16'
+ );
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "client_max_window_bits": 16$/
+ );
+ });
+ });
+
+ describe('As client', () => {
+ it('accepts a response with no parameters', () => {
+ const perMessageDeflate = new PerMessageDeflate({});
+
+ assert.deepStrictEqual(perMessageDeflate.accept([{}]), {});
+ });
+
+ it('accepts a response with parameters', () => {
+ const perMessageDeflate = new PerMessageDeflate({});
+ const extensions = extension.parse(
+ 'permessage-deflate; server_no_context_takeover; ' +
+ 'client_no_context_takeover; server_max_window_bits=10; ' +
+ 'client_max_window_bits=11'
+ );
+
+ assert.deepStrictEqual(
+ perMessageDeflate.accept(extensions['permessage-deflate']),
+ {
+ server_no_context_takeover: true,
+ client_no_context_takeover: true,
+ server_max_window_bits: 10,
+ client_max_window_bits: 11,
+ __proto__: null
+ }
+ );
+ });
+
+ it('throws an error if client_no_context_takeover is unsupported', () => {
+ const perMessageDeflate = new PerMessageDeflate({
+ clientNoContextTakeover: false
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: Unexpected parameter "client_no_context_takeover"$/
+ );
+ });
+
+ it('throws an error if client_max_window_bits is unsupported', () => {
+ const perMessageDeflate = new PerMessageDeflate({
+ clientMaxWindowBits: false
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_max_window_bits=10'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: Unexpected or invalid parameter "client_max_window_bits"$/
+ );
+ });
+
+ it('throws an error if client_max_window_bits is greater than configuration', () => {
+ const perMessageDeflate = new PerMessageDeflate({
+ clientMaxWindowBits: 10
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_max_window_bits=11'
+ );
+
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^Error: Unexpected or invalid parameter "client_max_window_bits"$/
+ );
+ });
+
+ it('throws an error if client_max_window_bits has an invalid value', () => {
+ const perMessageDeflate = new PerMessageDeflate();
+
+ let extensions = extension.parse(
+ 'permessage-deflate; client_max_window_bits=16'
+ );
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "client_max_window_bits": 16$/
+ );
+
+ extensions = extension.parse(
+ 'permessage-deflate; client_max_window_bits'
+ );
+ assert.throws(
+ () => perMessageDeflate.accept(extensions['permessage-deflate']),
+ /^TypeError: Invalid value for parameter "client_max_window_bits": true$/
+ );
+ });
+
+ it('uses the config value if client_max_window_bits is not specified', () => {
+ const perMessageDeflate = new PerMessageDeflate({
+ clientMaxWindowBits: 10
+ });
+
+ assert.deepStrictEqual(perMessageDeflate.accept([{}]), {
+ client_max_window_bits: 10
+ });
+ });
+ });
+ });
+
+ describe('#compress and #decompress', () => {
+ it('works with unfragmented messages', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const buf = Buffer.from([1, 2, 3]);
+
+ perMessageDeflate.accept([{}]);
+ perMessageDeflate.compress(buf, true, (err, data) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(data, true, (err, data) => {
+ if (err) return done(err);
+
+ assert.ok(data.equals(buf));
+ done();
+ });
+ });
+ });
+
+ it('works with fragmented messages', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const buf = Buffer.from([1, 2, 3, 4]);
+
+ perMessageDeflate.accept([{}]);
+
+ perMessageDeflate.compress(buf.slice(0, 2), false, (err, compressed1) => {
+ if (err) return done(err);
+
+ perMessageDeflate.compress(buf.slice(2), true, (err, compressed2) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(compressed1, false, (err, data1) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(compressed2, true, (err, data2) => {
+ if (err) return done(err);
+
+ assert.ok(Buffer.concat([data1, data2]).equals(buf));
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ it('works with the negotiated parameters', (done) => {
+ const perMessageDeflate = new PerMessageDeflate({
+ memLevel: 5,
+ level: 9
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; server_no_context_takeover; ' +
+ 'client_no_context_takeover; server_max_window_bits=10; ' +
+ 'client_max_window_bits=11'
+ );
+ const buf = Buffer.from("Some compressible data, it's compressible.");
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ perMessageDeflate.compress(buf, true, (err, data) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(data, true, (err, data) => {
+ if (err) return done(err);
+
+ assert.ok(data.equals(buf));
+ done();
+ });
+ });
+ });
+
+ it('honors the `level` option', (done) => {
+ const lev0 = new PerMessageDeflate({
+ zlibDeflateOptions: { level: 0 }
+ });
+ const lev9 = new PerMessageDeflate({
+ zlibDeflateOptions: { level: 9 }
+ });
+ const extensionStr =
+ 'permessage-deflate; server_no_context_takeover; ' +
+ 'client_no_context_takeover; server_max_window_bits=10; ' +
+ 'client_max_window_bits=11';
+ const buf = Buffer.from("Some compressible data, it's compressible.");
+
+ lev0.accept(extension.parse(extensionStr)['permessage-deflate']);
+ lev9.accept(extension.parse(extensionStr)['permessage-deflate']);
+
+ lev0.compress(buf, true, (err, compressed1) => {
+ if (err) return done(err);
+
+ lev0.decompress(compressed1, true, (err, decompressed1) => {
+ if (err) return done(err);
+
+ lev9.compress(buf, true, (err, compressed2) => {
+ if (err) return done(err);
+
+ lev9.decompress(compressed2, true, (err, decompressed2) => {
+ if (err) return done(err);
+
+ // Level 0 compression actually adds a few bytes due to headers.
+ assert.ok(compressed1.length > buf.length);
+ // Level 9 should not, of course.
+ assert.ok(compressed2.length < buf.length);
+ // Ensure they both decompress back properly.
+ assert.ok(decompressed1.equals(buf));
+ assert.ok(decompressed2.equals(buf));
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ it('honors the `zlib{Deflate,Inflate}Options` option', (done) => {
+ const lev0 = new PerMessageDeflate({
+ zlibDeflateOptions: {
+ level: 0,
+ chunkSize: 256
+ },
+ zlibInflateOptions: {
+ chunkSize: 2048
+ }
+ });
+ const lev9 = new PerMessageDeflate({
+ zlibDeflateOptions: {
+ level: 9,
+ chunkSize: 128
+ },
+ zlibInflateOptions: {
+ chunkSize: 1024
+ }
+ });
+
+ // Note no context takeover so we can get a hold of the raw streams after
+ // we do the dance.
+ const extensionStr =
+ 'permessage-deflate; server_max_window_bits=10; ' +
+ 'client_max_window_bits=11';
+ const buf = Buffer.from("Some compressible data, it's compressible.");
+
+ lev0.accept(extension.parse(extensionStr)['permessage-deflate']);
+ lev9.accept(extension.parse(extensionStr)['permessage-deflate']);
+
+ lev0.compress(buf, true, (err, compressed1) => {
+ if (err) return done(err);
+
+ lev0.decompress(compressed1, true, (err, decompressed1) => {
+ if (err) return done(err);
+
+ lev9.compress(buf, true, (err, compressed2) => {
+ if (err) return done(err);
+
+ lev9.decompress(compressed2, true, (err, decompressed2) => {
+ if (err) return done(err);
+ // Level 0 compression actually adds a few bytes due to headers.
+ assert.ok(compressed1.length > buf.length);
+ // Level 9 should not, of course.
+ assert.ok(compressed2.length < buf.length);
+ // Ensure they both decompress back properly.
+ assert.ok(decompressed1.equals(buf));
+ assert.ok(decompressed2.equals(buf));
+
+ // Assert options were set.
+ assert.ok(lev0._deflate._level === 0);
+ assert.ok(lev9._deflate._level === 9);
+ assert.ok(lev0._deflate._chunkSize === 256);
+ assert.ok(lev9._deflate._chunkSize === 128);
+ assert.ok(lev0._inflate._chunkSize === 2048);
+ assert.ok(lev9._inflate._chunkSize === 1024);
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ it("doesn't use contex takeover if not allowed", (done) => {
+ const perMessageDeflate = new PerMessageDeflate({}, true);
+ const extensions = extension.parse(
+ 'permessage-deflate;server_no_context_takeover'
+ );
+ const buf = Buffer.from('foofoo');
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ perMessageDeflate.compress(buf, true, (err, compressed1) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(compressed1, true, (err, data) => {
+ if (err) return done(err);
+
+ assert.ok(data.equals(buf));
+ perMessageDeflate.compress(data, true, (err, compressed2) => {
+ if (err) return done(err);
+
+ assert.strictEqual(compressed2.length, compressed1.length);
+ perMessageDeflate.decompress(compressed2, true, (err, data) => {
+ if (err) return done(err);
+
+ assert.ok(data.equals(buf));
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ it('uses contex takeover if allowed', (done) => {
+ const perMessageDeflate = new PerMessageDeflate({}, true);
+ const extensions = extension.parse('permessage-deflate');
+ const buf = Buffer.from('foofoo');
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ perMessageDeflate.compress(buf, true, (err, compressed1) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(compressed1, true, (err, data) => {
+ if (err) return done(err);
+
+ assert.ok(data.equals(buf));
+ perMessageDeflate.compress(data, true, (err, compressed2) => {
+ if (err) return done(err);
+
+ assert.ok(compressed2.length < compressed1.length);
+ perMessageDeflate.decompress(compressed2, true, (err, data) => {
+ if (err) return done(err);
+
+ assert.ok(data.equals(buf));
+ done();
+ });
+ });
+ });
+ });
+ });
+
+ it('calls the callback when an error occurs (inflate)', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const data = Buffer.from('something invalid');
+
+ perMessageDeflate.accept([{}]);
+ perMessageDeflate.decompress(data, true, (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'Z_DATA_ERROR');
+ assert.strictEqual(err.errno, -3);
+ done();
+ });
+ });
+
+ it("doesn't call the callback twice when `maxPayload` is exceeded", (done) => {
+ const perMessageDeflate = new PerMessageDeflate({}, false, 25);
+ const buf = Buffer.from('A'.repeat(50));
+
+ perMessageDeflate.accept([{}]);
+ perMessageDeflate.compress(buf, true, (err, data) => {
+ if (err) return done(err);
+
+ perMessageDeflate.decompress(data, true, (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.message, 'Max payload size exceeded');
+ done();
+ });
+ });
+ });
+
+ it('calls the callback if the deflate stream is closed prematurely', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const buf = Buffer.from('A'.repeat(50));
+
+ perMessageDeflate.accept([{}]);
+ perMessageDeflate.compress(buf, true, (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'The deflate stream was closed while data was being processed'
+ );
+ done();
+ });
+
+ process.nextTick(() => perMessageDeflate.cleanup());
+ });
+
+ it('recreates the inflate stream if it ends', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover; ' +
+ 'server_no_context_takeover'
+ );
+ const buf = Buffer.from('33343236313533b7000000', 'hex');
+ const expected = Buffer.from('12345678');
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ perMessageDeflate.decompress(buf, true, (err, data) => {
+ assert.ok(data.equals(expected));
+
+ perMessageDeflate.decompress(buf, true, (err, data) => {
+ assert.ok(data.equals(expected));
+ done();
+ });
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/receiver.test.js b/testing/xpcshell/node-ws/test/receiver.test.js
new file mode 100644
index 0000000000..7ee35f7402
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/receiver.test.js
@@ -0,0 +1,1086 @@
+'use strict';
+
+const assert = require('assert');
+const crypto = require('crypto');
+
+const PerMessageDeflate = require('../lib/permessage-deflate');
+const Receiver = require('../lib/receiver');
+const Sender = require('../lib/sender');
+const { EMPTY_BUFFER, kStatusCode } = require('../lib/constants');
+
+describe('Receiver', () => {
+ it('parses an unmasked text message', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, Buffer.from('Hello'));
+ assert.ok(!isBinary);
+ done();
+ });
+
+ receiver.write(Buffer.from('810548656c6c6f', 'hex'));
+ });
+
+ it('parses a close message', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('conclude', (code, data) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(data, EMPTY_BUFFER);
+ done();
+ });
+
+ receiver.write(Buffer.from('8800', 'hex'));
+ });
+
+ it('parses a close message spanning multiple writes', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('conclude', (code, data) => {
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(data, Buffer.from('DONE'));
+ done();
+ });
+
+ receiver.write(Buffer.from('8806', 'hex'));
+ receiver.write(Buffer.from('03e8444F4E45', 'hex'));
+ });
+
+ it('parses a masked text message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, Buffer.from('5:::{"name":"echo"}'));
+ assert.ok(!isBinary);
+ done();
+ });
+
+ receiver.write(
+ Buffer.from('81933483a86801b992524fa1c60959e68a5216e6cb005ba1d5', 'hex')
+ );
+ });
+
+ it('parses a masked text message longer than 125 B', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = Buffer.from('A'.repeat(200));
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x01,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(!isBinary);
+ done();
+ });
+
+ receiver.write(frame.slice(0, 2));
+ setImmediate(() => receiver.write(frame.slice(2)));
+ });
+
+ it('parses a really long masked text message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = Buffer.from('A'.repeat(64 * 1024));
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x01,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(!isBinary);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('parses a 300 B fragmented masked text message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = Buffer.from('A'.repeat(300));
+
+ const fragment1 = msg.slice(0, 150);
+ const fragment2 = msg.slice(150);
+
+ const options = { rsv1: false, mask: true, readOnly: true };
+
+ const frame1 = Buffer.concat(
+ Sender.frame(fragment1, {
+ fin: false,
+ opcode: 0x01,
+ ...options
+ })
+ );
+ const frame2 = Buffer.concat(
+ Sender.frame(fragment2, {
+ fin: true,
+ opcode: 0x00,
+ ...options
+ })
+ );
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(!isBinary);
+ done();
+ });
+
+ receiver.write(frame1);
+ receiver.write(frame2);
+ });
+
+ it('parses a ping message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = Buffer.from('Hello');
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x09,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('ping', (data) => {
+ assert.deepStrictEqual(data, msg);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('parses a ping message with no data', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('ping', (data) => {
+ assert.strictEqual(data, EMPTY_BUFFER);
+ done();
+ });
+
+ receiver.write(Buffer.from('8900', 'hex'));
+ });
+
+ it('parses a 300 B fragmented masked text message with a ping in the middle (1/2)', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = Buffer.from('A'.repeat(300));
+ const pingMessage = Buffer.from('Hello');
+
+ const fragment1 = msg.slice(0, 150);
+ const fragment2 = msg.slice(150);
+
+ const options = { rsv1: false, mask: true, readOnly: true };
+
+ const frame1 = Buffer.concat(
+ Sender.frame(fragment1, {
+ fin: false,
+ opcode: 0x01,
+ ...options
+ })
+ );
+ const frame2 = Buffer.concat(
+ Sender.frame(pingMessage, {
+ fin: true,
+ opcode: 0x09,
+ ...options
+ })
+ );
+ const frame3 = Buffer.concat(
+ Sender.frame(fragment2, {
+ fin: true,
+ opcode: 0x00,
+ ...options
+ })
+ );
+
+ let gotPing = false;
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(!isBinary);
+ assert.ok(gotPing);
+ done();
+ });
+ receiver.on('ping', (data) => {
+ gotPing = true;
+ assert.ok(data.equals(pingMessage));
+ });
+
+ receiver.write(frame1);
+ receiver.write(frame2);
+ receiver.write(frame3);
+ });
+
+ it('parses a 300 B fragmented masked text message with a ping in the middle (2/2)', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = Buffer.from('A'.repeat(300));
+ const pingMessage = Buffer.from('Hello');
+
+ const fragment1 = msg.slice(0, 150);
+ const fragment2 = msg.slice(150);
+
+ const options = { rsv1: false, mask: true, readOnly: false };
+
+ const frame1 = Buffer.concat(
+ Sender.frame(Buffer.from(fragment1), {
+ fin: false,
+ opcode: 0x01,
+ ...options
+ })
+ );
+ const frame2 = Buffer.concat(
+ Sender.frame(Buffer.from(pingMessage), {
+ fin: true,
+ opcode: 0x09,
+ ...options
+ })
+ );
+ const frame3 = Buffer.concat(
+ Sender.frame(Buffer.from(fragment2), {
+ fin: true,
+ opcode: 0x00,
+ ...options
+ })
+ );
+
+ let chunks = [];
+ const splitBuffer = (buf) => {
+ const i = Math.floor(buf.length / 2);
+ return [buf.slice(0, i), buf.slice(i)];
+ };
+
+ chunks = chunks.concat(splitBuffer(frame1));
+ chunks = chunks.concat(splitBuffer(frame2));
+ chunks = chunks.concat(splitBuffer(frame3));
+
+ let gotPing = false;
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(!isBinary);
+ assert.ok(gotPing);
+ done();
+ });
+ receiver.on('ping', (data) => {
+ gotPing = true;
+ assert.ok(data.equals(pingMessage));
+ });
+
+ for (let i = 0; i < chunks.length; ++i) {
+ receiver.write(chunks[i]);
+ }
+ });
+
+ it('parses a 100 B masked binary message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = crypto.randomBytes(100);
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x02,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(isBinary);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('parses a 256 B masked binary message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = crypto.randomBytes(256);
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x02,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(isBinary);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('parses a 200 KiB masked binary message', (done) => {
+ const receiver = new Receiver({ isServer: true });
+ const msg = crypto.randomBytes(200 * 1024);
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x02,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(isBinary);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('parses a 200 KiB unmasked binary message', (done) => {
+ const receiver = new Receiver();
+ const msg = crypto.randomBytes(200 * 1024);
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x02,
+ mask: false,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, msg);
+ assert.ok(isBinary);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('parses a compressed message', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: {
+ 'permessage-deflate': perMessageDeflate
+ }
+ });
+ const buf = Buffer.from('Hello');
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, buf);
+ assert.ok(!isBinary);
+ done();
+ });
+
+ perMessageDeflate.compress(buf, true, (err, data) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0xc1, data.length]));
+ receiver.write(data);
+ });
+ });
+
+ it('parses a compressed and fragmented message', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: {
+ 'permessage-deflate': perMessageDeflate
+ }
+ });
+ const buf1 = Buffer.from('foo');
+ const buf2 = Buffer.from('bar');
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, Buffer.concat([buf1, buf2]));
+ assert.ok(!isBinary);
+ done();
+ });
+
+ perMessageDeflate.compress(buf1, false, (err, fragment1) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0x41, fragment1.length]));
+ receiver.write(fragment1);
+
+ perMessageDeflate.compress(buf2, true, (err, fragment2) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0x80, fragment2.length]));
+ receiver.write(fragment2);
+ });
+ });
+ });
+
+ it('parses a buffer with thousands of frames', (done) => {
+ const buf = Buffer.allocUnsafe(40000);
+
+ for (let i = 0; i < buf.length; i += 2) {
+ buf[i] = 0x81;
+ buf[i + 1] = 0x00;
+ }
+
+ const receiver = new Receiver();
+ let counter = 0;
+
+ receiver.on('message', (data, isBinary) => {
+ assert.strictEqual(data, EMPTY_BUFFER);
+ assert.ok(!isBinary);
+ if (++counter === 20000) done();
+ });
+
+ receiver.write(buf);
+ });
+
+ it('resets `totalPayloadLength` only on final frame (unfragmented)', (done) => {
+ const receiver = new Receiver({ maxPayload: 10 });
+
+ receiver.on('message', (data, isBinary) => {
+ assert.strictEqual(receiver._totalPayloadLength, 0);
+ assert.deepStrictEqual(data, Buffer.from('Hello'));
+ assert.ok(!isBinary);
+ done();
+ });
+
+ assert.strictEqual(receiver._totalPayloadLength, 0);
+ receiver.write(Buffer.from('810548656c6c6f', 'hex'));
+ });
+
+ it('resets `totalPayloadLength` only on final frame (fragmented)', (done) => {
+ const receiver = new Receiver({ maxPayload: 10 });
+
+ receiver.on('message', (data, isBinary) => {
+ assert.strictEqual(receiver._totalPayloadLength, 0);
+ assert.deepStrictEqual(data, Buffer.from('Hello'));
+ assert.ok(!isBinary);
+ done();
+ });
+
+ assert.strictEqual(receiver._totalPayloadLength, 0);
+ receiver.write(Buffer.from('01024865', 'hex'));
+ assert.strictEqual(receiver._totalPayloadLength, 2);
+ receiver.write(Buffer.from('80036c6c6f', 'hex'));
+ });
+
+ it('resets `totalPayloadLength` only on final frame (fragmented + ping)', (done) => {
+ const receiver = new Receiver({ maxPayload: 10 });
+ let data;
+
+ receiver.on('ping', (buf) => {
+ assert.strictEqual(receiver._totalPayloadLength, 2);
+ data = buf;
+ });
+ receiver.on('message', (buf, isBinary) => {
+ assert.strictEqual(receiver._totalPayloadLength, 0);
+ assert.deepStrictEqual(data, EMPTY_BUFFER);
+ assert.deepStrictEqual(buf, Buffer.from('Hello'));
+ assert.ok(isBinary);
+ done();
+ });
+
+ assert.strictEqual(receiver._totalPayloadLength, 0);
+ receiver.write(Buffer.from('02024865', 'hex'));
+ receiver.write(Buffer.from('8900', 'hex'));
+ receiver.write(Buffer.from('80036c6c6f', 'hex'));
+ });
+
+ it('ignores any data after a close frame', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: {
+ 'permessage-deflate': perMessageDeflate
+ }
+ });
+ const results = [];
+ const push = results.push.bind(results);
+
+ receiver.on('conclude', push).on('message', push);
+ receiver.on('finish', () => {
+ assert.deepStrictEqual(results, [
+ EMPTY_BUFFER,
+ false,
+ 1005,
+ EMPTY_BUFFER
+ ]);
+ done();
+ });
+
+ receiver.write(Buffer.from([0xc1, 0x01, 0x00]));
+ receiver.write(Buffer.from([0x88, 0x00]));
+ receiver.write(Buffer.from([0x81, 0x00]));
+ });
+
+ it('emits an error if RSV1 is on and permessage-deflate is disabled', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNEXPECTED_RSV_1');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: RSV1 must be clear'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0xc2, 0x80, 0x00, 0x00, 0x00, 0x00]));
+ });
+
+ it('emits an error if RSV1 is on and opcode is 0', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: {
+ 'permessage-deflate': perMessageDeflate
+ }
+ });
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNEXPECTED_RSV_1');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: RSV1 must be clear'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x40, 0x00]));
+ });
+
+ it('emits an error if RSV2 is on', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNEXPECTED_RSV_2_3');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: RSV2 and RSV3 must be clear'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0xa2, 0x00]));
+ });
+
+ it('emits an error if RSV3 is on', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNEXPECTED_RSV_2_3');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: RSV2 and RSV3 must be clear'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x92, 0x00]));
+ });
+
+ it('emits an error if the first frame in a fragmented message has opcode 0', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 0'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x00, 0x00]));
+ });
+
+ it('emits an error if a frame has opcode 1 in the middle of a fragmented message', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 1'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x01, 0x00]));
+ receiver.write(Buffer.from([0x01, 0x00]));
+ });
+
+ it('emits an error if a frame has opcode 2 in the middle of a fragmented message', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 2'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x01, 0x00]));
+ receiver.write(Buffer.from([0x02, 0x00]));
+ });
+
+ it('emits an error if a control frame has the FIN bit off', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_EXPECTED_FIN');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: FIN must be set'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x09, 0x00]));
+ });
+
+ it('emits an error if a control frame has the RSV1 bit on', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: {
+ 'permessage-deflate': perMessageDeflate
+ }
+ });
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNEXPECTED_RSV_1');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: RSV1 must be clear'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0xc9, 0x00]));
+ });
+
+ it('emits an error if a control frame has the FIN bit off', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_EXPECTED_FIN');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: FIN must be set'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x09, 0x00]));
+ });
+
+ it('emits an error if a frame has the MASK bit off (server mode)', (done) => {
+ const receiver = new Receiver({ isServer: true });
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_EXPECTED_MASK');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: MASK must be set'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x81, 0x02, 0x68, 0x69]));
+ });
+
+ it('emits an error if a frame has the MASK bit on (client mode)', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNEXPECTED_MASK');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: MASK must be clear'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(
+ Buffer.from([0x81, 0x82, 0x56, 0x3a, 0xac, 0x80, 0x3e, 0x53])
+ );
+ });
+
+ it('emits an error if a control frame has a payload bigger than 125 B', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid payload length 126'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x89, 0x7e]));
+ });
+
+ it('emits an error if a data frame has a payload bigger than 2^53 - 1 B', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH');
+ assert.strictEqual(
+ err.message,
+ 'Unsupported WebSocket frame: payload length > 2^53 - 1'
+ );
+ assert.strictEqual(err[kStatusCode], 1009);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x82, 0x7f]));
+ setImmediate(() =>
+ receiver.write(
+ Buffer.from([0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
+ )
+ );
+ });
+
+ it('emits an error if a text frame contains invalid UTF-8 data (1/2)', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_UTF8');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid UTF-8 sequence'
+ );
+ assert.strictEqual(err[kStatusCode], 1007);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x81, 0x04, 0xce, 0xba, 0xe1, 0xbd]));
+ });
+
+ it('emits an error if a text frame contains invalid UTF-8 data (2/2)', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: {
+ 'permessage-deflate': perMessageDeflate
+ }
+ });
+ const buf = Buffer.from([0xce, 0xba, 0xe1, 0xbd]);
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_UTF8');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid UTF-8 sequence'
+ );
+ assert.strictEqual(err[kStatusCode], 1007);
+ done();
+ });
+
+ perMessageDeflate.compress(buf, true, (err, data) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0xc1, data.length]));
+ receiver.write(data);
+ });
+ });
+
+ it('emits an error if a close frame has a payload of 1 B', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid payload length 1'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x88, 0x01, 0x00]));
+ });
+
+ it('emits an error if a close frame contains an invalid close code', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_CLOSE_CODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid status code 0'
+ );
+ assert.strictEqual(err[kStatusCode], 1002);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x88, 0x02, 0x00, 0x00]));
+ });
+
+ it('emits an error if a close frame contains invalid UTF-8 data', (done) => {
+ const receiver = new Receiver();
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_UTF8');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid UTF-8 sequence'
+ );
+ assert.strictEqual(err[kStatusCode], 1007);
+ done();
+ });
+
+ receiver.write(
+ Buffer.from([0x88, 0x06, 0x03, 0xef, 0xce, 0xba, 0xe1, 0xbd])
+ );
+ });
+
+ it('emits an error if a frame payload length is bigger than `maxPayload`', (done) => {
+ const receiver = new Receiver({ isServer: true, maxPayload: 20 * 1024 });
+ const msg = crypto.randomBytes(200 * 1024);
+
+ const list = Sender.frame(msg, {
+ fin: true,
+ rsv1: false,
+ opcode: 0x02,
+ mask: true,
+ readOnly: true
+ });
+
+ const frame = Buffer.concat(list);
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH');
+ assert.strictEqual(err.message, 'Max payload size exceeded');
+ assert.strictEqual(err[kStatusCode], 1009);
+ done();
+ });
+
+ receiver.write(frame);
+ });
+
+ it('emits an error if the message length exceeds `maxPayload`', (done) => {
+ const perMessageDeflate = new PerMessageDeflate({}, false, 25);
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: { 'permessage-deflate': perMessageDeflate },
+ isServer: false,
+ maxPayload: 25
+ });
+ const buf = Buffer.from('A'.repeat(50));
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH');
+ assert.strictEqual(err.message, 'Max payload size exceeded');
+ assert.strictEqual(err[kStatusCode], 1009);
+ done();
+ });
+
+ perMessageDeflate.compress(buf, true, (err, data) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0xc1, data.length]));
+ receiver.write(data);
+ });
+ });
+
+ it('emits an error if the sum of fragment lengths exceeds `maxPayload`', (done) => {
+ const perMessageDeflate = new PerMessageDeflate({}, false, 25);
+ perMessageDeflate.accept([{}]);
+
+ const receiver = new Receiver({
+ extensions: { 'permessage-deflate': perMessageDeflate },
+ isServer: false,
+ maxPayload: 25
+ });
+ const buf = Buffer.from('A'.repeat(15));
+
+ receiver.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH');
+ assert.strictEqual(err.message, 'Max payload size exceeded');
+ assert.strictEqual(err[kStatusCode], 1009);
+ done();
+ });
+
+ perMessageDeflate.compress(buf, false, (err, fragment1) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0x41, fragment1.length]));
+ receiver.write(fragment1);
+
+ perMessageDeflate.compress(buf, true, (err, fragment2) => {
+ if (err) return done(err);
+
+ receiver.write(Buffer.from([0x80, fragment2.length]));
+ receiver.write(fragment2);
+ });
+ });
+ });
+
+ it("honors the 'nodebuffer' binary type", (done) => {
+ const receiver = new Receiver();
+ const frags = [
+ crypto.randomBytes(7321),
+ crypto.randomBytes(137),
+ crypto.randomBytes(285787),
+ crypto.randomBytes(3)
+ ];
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, Buffer.concat(frags));
+ assert.ok(isBinary);
+ done();
+ });
+
+ frags.forEach((frag, i) => {
+ Sender.frame(frag, {
+ fin: i === frags.length - 1,
+ opcode: i === 0 ? 2 : 0,
+ readOnly: true,
+ mask: false,
+ rsv1: false
+ }).forEach((buf) => receiver.write(buf));
+ });
+ });
+
+ it("honors the 'arraybuffer' binary type", (done) => {
+ const receiver = new Receiver({ binaryType: 'arraybuffer' });
+ const frags = [
+ crypto.randomBytes(19221),
+ crypto.randomBytes(954),
+ crypto.randomBytes(623987)
+ ];
+
+ receiver.on('message', (data, isBinary) => {
+ assert.ok(data instanceof ArrayBuffer);
+ assert.deepStrictEqual(Buffer.from(data), Buffer.concat(frags));
+ assert.ok(isBinary);
+ done();
+ });
+
+ frags.forEach((frag, i) => {
+ Sender.frame(frag, {
+ fin: i === frags.length - 1,
+ opcode: i === 0 ? 2 : 0,
+ readOnly: true,
+ mask: false,
+ rsv1: false
+ }).forEach((buf) => receiver.write(buf));
+ });
+ });
+
+ it("honors the 'fragments' binary type", (done) => {
+ const receiver = new Receiver({ binaryType: 'fragments' });
+ const frags = [
+ crypto.randomBytes(17),
+ crypto.randomBytes(419872),
+ crypto.randomBytes(83),
+ crypto.randomBytes(9928),
+ crypto.randomBytes(1)
+ ];
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, frags);
+ assert.ok(isBinary);
+ done();
+ });
+
+ frags.forEach((frag, i) => {
+ Sender.frame(frag, {
+ fin: i === frags.length - 1,
+ opcode: i === 0 ? 2 : 0,
+ readOnly: true,
+ mask: false,
+ rsv1: false
+ }).forEach((buf) => receiver.write(buf));
+ });
+ });
+
+ it('honors the `skipUTF8Validation` option (1/2)', (done) => {
+ const receiver = new Receiver({ skipUTF8Validation: true });
+
+ receiver.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, Buffer.from([0xf8]));
+ assert.ok(!isBinary);
+ done();
+ });
+
+ receiver.write(Buffer.from([0x81, 0x01, 0xf8]));
+ });
+
+ it('honors the `skipUTF8Validation` option (2/2)', (done) => {
+ const receiver = new Receiver({ skipUTF8Validation: true });
+
+ receiver.on('conclude', (code, data) => {
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(data, Buffer.from([0xf8]));
+ done();
+ });
+
+ receiver.write(Buffer.from([0x88, 0x03, 0x03, 0xe8, 0xf8]));
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/sender.test.js b/testing/xpcshell/node-ws/test/sender.test.js
new file mode 100644
index 0000000000..532239fa1a
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/sender.test.js
@@ -0,0 +1,370 @@
+'use strict';
+
+const assert = require('assert');
+
+const extension = require('../lib/extension');
+const PerMessageDeflate = require('../lib/permessage-deflate');
+const Sender = require('../lib/sender');
+const { EMPTY_BUFFER } = require('../lib/constants');
+
+class MockSocket {
+ constructor({ write } = {}) {
+ this.readable = true;
+ this.writable = true;
+
+ if (write) this.write = write;
+ }
+
+ cork() {}
+ write() {}
+ uncork() {}
+}
+
+describe('Sender', () => {
+ describe('.frame', () => {
+ it('does not mutate the input buffer if data is `readOnly`', () => {
+ const buf = Buffer.from([1, 2, 3, 4, 5]);
+
+ Sender.frame(buf, {
+ readOnly: true,
+ rsv1: false,
+ mask: true,
+ opcode: 2,
+ fin: true
+ });
+
+ assert.ok(buf.equals(Buffer.from([1, 2, 3, 4, 5])));
+ });
+
+ it('honors the `rsv1` option', () => {
+ const list = Sender.frame(EMPTY_BUFFER, {
+ readOnly: false,
+ mask: false,
+ rsv1: true,
+ opcode: 1,
+ fin: true
+ });
+
+ assert.strictEqual(list[0][0] & 0x40, 0x40);
+ });
+
+ it('accepts a string as first argument', () => {
+ const list = Sender.frame('€', {
+ readOnly: false,
+ rsv1: false,
+ mask: false,
+ opcode: 1,
+ fin: true
+ });
+
+ assert.deepStrictEqual(list[0], Buffer.from('8103', 'hex'));
+ assert.deepStrictEqual(list[1], Buffer.from('e282ac', 'hex'));
+ });
+ });
+
+ describe('#send', () => {
+ it('compresses data if compress option is enabled', (done) => {
+ const chunks = [];
+ const perMessageDeflate = new PerMessageDeflate();
+ const mockSocket = new MockSocket({
+ write: (chunk) => {
+ chunks.push(chunk);
+ if (chunks.length !== 6) return;
+
+ assert.strictEqual(chunks[0].length, 2);
+ assert.strictEqual(chunks[0][0] & 0x40, 0x40);
+
+ assert.strictEqual(chunks[2].length, 2);
+ assert.strictEqual(chunks[2][0] & 0x40, 0x40);
+
+ assert.strictEqual(chunks[4].length, 2);
+ assert.strictEqual(chunks[4][0] & 0x40, 0x40);
+ done();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+
+ perMessageDeflate.accept([{}]);
+
+ const options = { compress: true, fin: true };
+ const array = new Uint8Array([0x68, 0x69]);
+
+ sender.send(array.buffer, options);
+ sender.send(array, options);
+ sender.send('hi', options);
+ });
+
+ describe('when context takeover is disabled', () => {
+ it('honors the compression threshold', (done) => {
+ const chunks = [];
+ const perMessageDeflate = new PerMessageDeflate();
+ const mockSocket = new MockSocket({
+ write: (chunk) => {
+ chunks.push(chunk);
+ if (chunks.length !== 2) return;
+
+ assert.strictEqual(chunks[0].length, 2);
+ assert.notStrictEqual(chunk[0][0] & 0x40, 0x40);
+ assert.strictEqual(chunks[1], 'hi');
+ done();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover'
+ );
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ sender.send('hi', { compress: true, fin: true });
+ });
+
+ it('compresses all fragments of a fragmented message', (done) => {
+ const chunks = [];
+ const perMessageDeflate = new PerMessageDeflate({ threshold: 3 });
+ const mockSocket = new MockSocket({
+ write: (chunk) => {
+ chunks.push(chunk);
+ if (chunks.length !== 4) return;
+
+ assert.strictEqual(chunks[0].length, 2);
+ assert.strictEqual(chunks[0][0] & 0x40, 0x40);
+ assert.strictEqual(chunks[1].length, 9);
+
+ assert.strictEqual(chunks[2].length, 2);
+ assert.strictEqual(chunks[2][0] & 0x40, 0x00);
+ assert.strictEqual(chunks[3].length, 4);
+ done();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover'
+ );
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ sender.send('123', { compress: true, fin: false });
+ sender.send('12', { compress: true, fin: true });
+ });
+
+ it('does not compress any fragments of a fragmented message', (done) => {
+ const chunks = [];
+ const perMessageDeflate = new PerMessageDeflate({ threshold: 3 });
+ const mockSocket = new MockSocket({
+ write: (chunk) => {
+ chunks.push(chunk);
+ if (chunks.length !== 4) return;
+
+ assert.strictEqual(chunks[0].length, 2);
+ assert.strictEqual(chunks[0][0] & 0x40, 0x00);
+ assert.strictEqual(chunks[1].length, 2);
+
+ assert.strictEqual(chunks[2].length, 2);
+ assert.strictEqual(chunks[2][0] & 0x40, 0x00);
+ assert.strictEqual(chunks[3].length, 3);
+ done();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover'
+ );
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ sender.send('12', { compress: true, fin: false });
+ sender.send('123', { compress: true, fin: true });
+ });
+
+ it('compresses empty buffer as first fragment', (done) => {
+ const chunks = [];
+ const perMessageDeflate = new PerMessageDeflate({ threshold: 0 });
+ const mockSocket = new MockSocket({
+ write: (chunk) => {
+ chunks.push(chunk);
+ if (chunks.length !== 4) return;
+
+ assert.strictEqual(chunks[0].length, 2);
+ assert.strictEqual(chunks[0][0] & 0x40, 0x40);
+ assert.strictEqual(chunks[1].length, 5);
+
+ assert.strictEqual(chunks[2].length, 2);
+ assert.strictEqual(chunks[2][0] & 0x40, 0x00);
+ assert.strictEqual(chunks[3].length, 6);
+ done();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover'
+ );
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ sender.send(Buffer.alloc(0), { compress: true, fin: false });
+ sender.send('data', { compress: true, fin: true });
+ });
+
+ it('compresses empty buffer as last fragment', (done) => {
+ const chunks = [];
+ const perMessageDeflate = new PerMessageDeflate({ threshold: 0 });
+ const mockSocket = new MockSocket({
+ write: (chunk) => {
+ chunks.push(chunk);
+ if (chunks.length !== 4) return;
+
+ assert.strictEqual(chunks[0].length, 2);
+ assert.strictEqual(chunks[0][0] & 0x40, 0x40);
+ assert.strictEqual(chunks[1].length, 10);
+
+ assert.strictEqual(chunks[2].length, 2);
+ assert.strictEqual(chunks[2][0] & 0x40, 0x00);
+ assert.strictEqual(chunks[3].length, 1);
+ done();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+ const extensions = extension.parse(
+ 'permessage-deflate; client_no_context_takeover'
+ );
+
+ perMessageDeflate.accept(extensions['permessage-deflate']);
+
+ sender.send('data', { compress: true, fin: false });
+ sender.send(Buffer.alloc(0), { compress: true, fin: true });
+ });
+ });
+ });
+
+ describe('#ping', () => {
+ it('works with multiple types of data', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ let count = 0;
+ const mockSocket = new MockSocket({
+ write: (data) => {
+ if (++count < 3) return;
+
+ if (count % 2) {
+ assert.ok(data.equals(Buffer.from([0x89, 0x02])));
+ } else if (count < 8) {
+ assert.ok(data.equals(Buffer.from([0x68, 0x69])));
+ } else {
+ assert.strictEqual(data, 'hi');
+ done();
+ }
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+
+ perMessageDeflate.accept([{}]);
+
+ const array = new Uint8Array([0x68, 0x69]);
+
+ sender.send('foo', { compress: true, fin: true });
+ sender.ping(array.buffer, false);
+ sender.ping(array, false);
+ sender.ping('hi', false);
+ });
+ });
+
+ describe('#pong', () => {
+ it('works with multiple types of data', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+ let count = 0;
+ const mockSocket = new MockSocket({
+ write: (data) => {
+ if (++count < 3) return;
+
+ if (count % 2) {
+ assert.ok(data.equals(Buffer.from([0x8a, 0x02])));
+ } else if (count < 8) {
+ assert.ok(data.equals(Buffer.from([0x68, 0x69])));
+ } else {
+ assert.strictEqual(data, 'hi');
+ done();
+ }
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+
+ perMessageDeflate.accept([{}]);
+
+ const array = new Uint8Array([0x68, 0x69]);
+
+ sender.send('foo', { compress: true, fin: true });
+ sender.pong(array.buffer, false);
+ sender.pong(array, false);
+ sender.pong('hi', false);
+ });
+ });
+
+ describe('#close', () => {
+ it('throws an error if the first argument is invalid', () => {
+ const mockSocket = new MockSocket();
+ const sender = new Sender(mockSocket);
+
+ assert.throws(
+ () => sender.close('error'),
+ /^TypeError: First argument must be a valid error code number$/
+ );
+
+ assert.throws(
+ () => sender.close(1004),
+ /^TypeError: First argument must be a valid error code number$/
+ );
+ });
+
+ it('throws an error if the message is greater than 123 bytes', () => {
+ const mockSocket = new MockSocket();
+ const sender = new Sender(mockSocket);
+
+ assert.throws(
+ () => sender.close(1000, 'a'.repeat(124)),
+ /^RangeError: The message must not be greater than 123 bytes$/
+ );
+ });
+
+ it('should consume all data before closing', (done) => {
+ const perMessageDeflate = new PerMessageDeflate();
+
+ let count = 0;
+ const mockSocket = new MockSocket({
+ write: (data, cb) => {
+ count++;
+ if (cb) cb();
+ }
+ });
+ const sender = new Sender(mockSocket, {
+ 'permessage-deflate': perMessageDeflate
+ });
+
+ perMessageDeflate.accept([{}]);
+
+ sender.send('foo', { compress: true, fin: true });
+ sender.send('bar', { compress: true, fin: true });
+ sender.send('baz', { compress: true, fin: true });
+
+ sender.close(1000, undefined, false, () => {
+ assert.strictEqual(count, 8);
+ done();
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/subprotocol.test.js b/testing/xpcshell/node-ws/test/subprotocol.test.js
new file mode 100644
index 0000000000..91dd5d69d8
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/subprotocol.test.js
@@ -0,0 +1,91 @@
+'use strict';
+
+const assert = require('assert');
+
+const { parse } = require('../lib/subprotocol');
+
+describe('subprotocol', () => {
+ describe('parse', () => {
+ it('parses a single subprotocol', () => {
+ assert.deepStrictEqual(parse('foo'), new Set(['foo']));
+ });
+
+ it('parses multiple subprotocols', () => {
+ assert.deepStrictEqual(
+ parse('foo,bar,baz'),
+ new Set(['foo', 'bar', 'baz'])
+ );
+ });
+
+ it('ignores the optional white spaces', () => {
+ const header = 'foo , bar\t, \tbaz\t , qux\t\t,norf';
+
+ assert.deepStrictEqual(
+ parse(header),
+ new Set(['foo', 'bar', 'baz', 'qux', 'norf'])
+ );
+ });
+
+ it('throws an error if a subprotocol is empty', () => {
+ [
+ [',', 0],
+ ['foo,,', 4],
+ ['foo, ,', 6]
+ ].forEach((element) => {
+ assert.throws(
+ () => parse(element[0]),
+ new RegExp(
+ `^SyntaxError: Unexpected character at index ${element[1]}$`
+ )
+ );
+ });
+ });
+
+ it('throws an error if a subprotocol is duplicated', () => {
+ ['foo,foo,bar', 'foo,bar,foo'].forEach((header) => {
+ assert.throws(
+ () => parse(header),
+ /^SyntaxError: The "foo" subprotocol is duplicated$/
+ );
+ });
+ });
+
+ it('throws an error if a white space is misplaced', () => {
+ [
+ ['f oo', 2],
+ [' foo', 0]
+ ].forEach((element) => {
+ assert.throws(
+ () => parse(element[0]),
+ new RegExp(
+ `^SyntaxError: Unexpected character at index ${element[1]}$`
+ )
+ );
+ });
+ });
+
+ it('throws an error if a subprotocol contains invalid characters', () => {
+ [
+ ['f@o', 1],
+ ['f\\oo', 1],
+ ['foo,b@r', 5]
+ ].forEach((element) => {
+ assert.throws(
+ () => parse(element[0]),
+ new RegExp(
+ `^SyntaxError: Unexpected character at index ${element[1]}$`
+ )
+ );
+ });
+ });
+
+ it('throws an error if the header value ends prematurely', () => {
+ ['foo ', 'foo, ', 'foo,bar ', 'foo,bar,'].forEach((header) => {
+ assert.throws(
+ () => parse(header),
+ /^SyntaxError: Unexpected end of input$/
+ );
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/validation.test.js b/testing/xpcshell/node-ws/test/validation.test.js
new file mode 100644
index 0000000000..5718b12f02
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/validation.test.js
@@ -0,0 +1,52 @@
+'use strict';
+
+const assert = require('assert');
+
+const { isValidUTF8 } = require('../lib/validation');
+
+describe('extension', () => {
+ describe('isValidUTF8', () => {
+ it('returns false if it finds invalid bytes', () => {
+ assert.strictEqual(isValidUTF8(Buffer.from([0xf8])), false);
+ });
+
+ it('returns false for overlong encodings', () => {
+ assert.strictEqual(isValidUTF8(Buffer.from([0xc0, 0xa0])), false);
+ assert.strictEqual(isValidUTF8(Buffer.from([0xe0, 0x80, 0xa0])), false);
+ assert.strictEqual(
+ isValidUTF8(Buffer.from([0xf0, 0x80, 0x80, 0xa0])),
+ false
+ );
+ });
+
+ it('returns false for code points in the range U+D800 - U+DFFF', () => {
+ for (let i = 0xa0; i < 0xc0; i++) {
+ for (let j = 0x80; j < 0xc0; j++) {
+ assert.strictEqual(isValidUTF8(Buffer.from([0xed, i, j])), false);
+ }
+ }
+ });
+
+ it('returns false for code points greater than U+10FFFF', () => {
+ assert.strictEqual(
+ isValidUTF8(Buffer.from([0xf4, 0x90, 0x80, 0x80])),
+ false
+ );
+ assert.strictEqual(
+ isValidUTF8(Buffer.from([0xf5, 0x80, 0x80, 0x80])),
+ false
+ );
+ });
+
+ it('returns true for a well-formed UTF-8 byte sequence', () => {
+ // prettier-ignore
+ const buf = Buffer.from([
+ 0xe2, 0x82, 0xAC, // €
+ 0xf0, 0x90, 0x8c, 0x88, // 𐍈
+ 0x24 // $
+ ]);
+
+ assert.strictEqual(isValidUTF8(buf), true);
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/websocket-server.test.js b/testing/xpcshell/node-ws/test/websocket-server.test.js
new file mode 100644
index 0000000000..12928ff495
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/websocket-server.test.js
@@ -0,0 +1,1284 @@
+/* eslint no-unused-vars: ["error", { "varsIgnorePattern": "^ws$" }] */
+
+'use strict';
+
+const assert = require('assert');
+const crypto = require('crypto');
+const https = require('https');
+const http = require('http');
+const path = require('path');
+const net = require('net');
+const fs = require('fs');
+const os = require('os');
+
+const Sender = require('../lib/sender');
+const WebSocket = require('..');
+const { NOOP } = require('../lib/constants');
+
+describe('WebSocketServer', () => {
+ describe('#ctor', () => {
+ it('throws an error if no option object is passed', () => {
+ assert.throws(
+ () => new WebSocket.Server(),
+ new RegExp(
+ '^TypeError: One and only one of the "port", "server", or ' +
+ '"noServer" options must be specified$'
+ )
+ );
+ });
+
+ describe('options', () => {
+ it('throws an error if required options are not specified', () => {
+ assert.throws(
+ () => new WebSocket.Server({}),
+ new RegExp(
+ '^TypeError: One and only one of the "port", "server", or ' +
+ '"noServer" options must be specified$'
+ )
+ );
+ });
+
+ it('throws an error if mutually exclusive options are specified', () => {
+ const server = http.createServer();
+ const variants = [
+ { port: 0, noServer: true, server },
+ { port: 0, noServer: true },
+ { port: 0, server },
+ { noServer: true, server }
+ ];
+
+ for (const options of variants) {
+ assert.throws(
+ () => new WebSocket.Server(options),
+ new RegExp(
+ '^TypeError: One and only one of the "port", "server", or ' +
+ '"noServer" options must be specified$'
+ )
+ );
+ }
+ });
+
+ it('exposes options passed to constructor', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ assert.strictEqual(wss.options.port, 0);
+ wss.close(done);
+ });
+ });
+
+ it('accepts the `maxPayload` option', (done) => {
+ const maxPayload = 20480;
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ maxPayload,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', ws.close);
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ assert.strictEqual(ws._receiver._maxPayload, maxPayload);
+ assert.strictEqual(
+ ws._receiver._extensions['permessage-deflate']._maxPayload,
+ maxPayload
+ );
+ wss.close(done);
+ });
+ });
+
+ it('honors the `WebSocket` option', (done) => {
+ class CustomWebSocket extends WebSocket.WebSocket {
+ get foo() {
+ return 'foo';
+ }
+ }
+
+ const wss = new WebSocket.Server(
+ {
+ port: 0,
+ WebSocket: CustomWebSocket
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', ws.close);
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ assert.ok(ws instanceof CustomWebSocket);
+ assert.strictEqual(ws.foo, 'foo');
+ wss.close(done);
+ });
+ });
+ });
+
+ it('emits an error if http server bind fails', (done) => {
+ const wss1 = new WebSocket.Server({ port: 0 }, () => {
+ const wss2 = new WebSocket.Server({
+ port: wss1.address().port
+ });
+
+ wss2.on('error', () => wss1.close(done));
+ });
+ });
+
+ it('starts a server on a given port', (done) => {
+ const port = 1337;
+ const wss = new WebSocket.Server({ port }, () => {
+ const ws = new WebSocket(`ws://localhost:${port}`);
+
+ ws.on('open', ws.close);
+ });
+
+ wss.on('connection', () => wss.close(done));
+ });
+
+ it('binds the server on any IPv6 address when available', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ assert.strictEqual(wss._server.address().address, '::');
+ wss.close(done);
+ });
+ });
+
+ it('uses a precreated http server', (done) => {
+ const server = http.createServer();
+
+ server.listen(0, () => {
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('connection', () => {
+ server.close(done);
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', ws.close);
+ });
+ });
+
+ it('426s for non-Upgrade requests', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ http.get(`http://localhost:${wss.address().port}`, (res) => {
+ let body = '';
+
+ assert.strictEqual(res.statusCode, 426);
+ res.on('data', (chunk) => {
+ body += chunk;
+ });
+ res.on('end', () => {
+ assert.strictEqual(body, http.STATUS_CODES[426]);
+ wss.close(done);
+ });
+ });
+ });
+ });
+
+ it('uses a precreated http server listening on unix socket', function (done) {
+ //
+ // Skip this test on Windows. The URL parser:
+ //
+ // - Throws an error if the named pipe uses backward slashes.
+ // - Incorrectly parses the path if the named pipe uses forward slashes.
+ //
+ if (process.platform === 'win32') return this.skip();
+
+ const server = http.createServer();
+ const sockPath = path.join(
+ os.tmpdir(),
+ `ws.${crypto.randomBytes(16).toString('hex')}.sock`
+ );
+
+ server.listen(sockPath, () => {
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('connection', (ws, req) => {
+ if (wss.clients.size === 1) {
+ assert.strictEqual(req.url, '/foo?bar=bar');
+ } else {
+ assert.strictEqual(req.url, '/');
+
+ for (const client of wss.clients) {
+ client.close();
+ }
+
+ server.close(done);
+ }
+ });
+
+ const ws = new WebSocket(`ws+unix://${sockPath}:/foo?bar=bar`);
+ ws.on('open', () => new WebSocket(`ws+unix://${sockPath}`));
+ });
+ });
+ });
+
+ describe('#address', () => {
+ it('returns the address of the server', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const addr = wss.address();
+
+ assert.deepStrictEqual(addr, wss._server.address());
+ wss.close(done);
+ });
+ });
+
+ it('throws an error when operating in "noServer" mode', () => {
+ const wss = new WebSocket.Server({ noServer: true });
+
+ assert.throws(() => {
+ wss.address();
+ }, /^Error: The server is operating in "noServer" mode$/);
+ });
+
+ it('returns `null` if called after close', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ wss.close(() => {
+ assert.strictEqual(wss.address(), null);
+ done();
+ });
+ });
+ });
+ });
+
+ describe('#close', () => {
+ it('does not throw if called multiple times', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ wss.on('close', done);
+
+ wss.close();
+ wss.close();
+ wss.close();
+ });
+ });
+
+ it("doesn't close a precreated server", (done) => {
+ const server = http.createServer();
+ const realClose = server.close;
+
+ server.close = () => {
+ done(new Error('Must not close pre-created server'));
+ };
+
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('connection', () => {
+ wss.close();
+ server.close = realClose;
+ server.close(done);
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', ws.close);
+ });
+ });
+
+ it('invokes the callback in noServer mode', (done) => {
+ const wss = new WebSocket.Server({ noServer: true });
+
+ wss.close(done);
+ });
+
+ it('cleans event handlers on precreated server', (done) => {
+ const server = http.createServer();
+ const wss = new WebSocket.Server({ server });
+
+ server.listen(0, () => {
+ wss.close(() => {
+ assert.strictEqual(server.listenerCount('listening'), 0);
+ assert.strictEqual(server.listenerCount('upgrade'), 0);
+ assert.strictEqual(server.listenerCount('error'), 0);
+
+ server.close(done);
+ });
+ });
+ });
+
+ it("emits the 'close' event after the server closes", (done) => {
+ let serverCloseEventEmitted = false;
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ net.createConnection({ port: wss.address().port });
+ });
+
+ wss._server.on('connection', (socket) => {
+ wss.close();
+
+ //
+ // The server is closing. Ensure this does not emit a `'close'`
+ // event before the server is actually closed.
+ //
+ wss.close();
+
+ process.nextTick(() => {
+ socket.end();
+ });
+ });
+
+ wss._server.on('close', () => {
+ serverCloseEventEmitted = true;
+ });
+
+ wss.on('close', () => {
+ assert.ok(serverCloseEventEmitted);
+ done();
+ });
+ });
+
+ it("emits the 'close' event if client tracking is disabled", (done) => {
+ const wss = new WebSocket.Server({
+ noServer: true,
+ clientTracking: false
+ });
+
+ wss.on('close', done);
+ wss.close();
+ });
+
+ it('calls the callback if the server is already closed', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ wss.close(() => {
+ assert.strictEqual(wss._state, 2);
+
+ wss.close((err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'The server is not running');
+ done();
+ });
+ });
+ });
+ });
+
+ it("emits the 'close' event if the server is already closed", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ wss.close(() => {
+ assert.strictEqual(wss._state, 2);
+
+ wss.on('close', done);
+ wss.close();
+ });
+ });
+ });
+ });
+
+ describe('#clients', () => {
+ it('returns a list of connected clients', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ assert.strictEqual(wss.clients.size, 0);
+
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', ws.close);
+ });
+
+ wss.on('connection', () => {
+ assert.strictEqual(wss.clients.size, 1);
+ wss.close(done);
+ });
+ });
+
+ it('can be disabled', (done) => {
+ const wss = new WebSocket.Server(
+ { port: 0, clientTracking: false },
+ () => {
+ assert.strictEqual(wss.clients, undefined);
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.close());
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ assert.strictEqual(wss.clients, undefined);
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ it('is updated when client terminates the connection', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.terminate());
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('close', () => {
+ assert.strictEqual(wss.clients.size, 0);
+ wss.close(done);
+ });
+ });
+ });
+
+ it('is updated when client closes the connection', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.close());
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('close', () => {
+ assert.strictEqual(wss.clients.size, 0);
+ wss.close(done);
+ });
+ });
+ });
+ });
+
+ describe('#shouldHandle', () => {
+ it('returns true when the path matches', () => {
+ const wss = new WebSocket.Server({ noServer: true, path: '/foo' });
+
+ assert.strictEqual(wss.shouldHandle({ url: '/foo' }), true);
+ assert.strictEqual(wss.shouldHandle({ url: '/foo?bar=baz' }), true);
+ });
+
+ it("returns false when the path doesn't match", () => {
+ const wss = new WebSocket.Server({ noServer: true, path: '/foo' });
+
+ assert.strictEqual(wss.shouldHandle({ url: '/bar' }), false);
+ });
+ });
+
+ describe('#handleUpgrade', () => {
+ it('can be used for a pre-existing server', (done) => {
+ const server = http.createServer();
+
+ server.listen(0, () => {
+ const wss = new WebSocket.Server({ noServer: true });
+
+ server.on('upgrade', (req, socket, head) => {
+ wss.handleUpgrade(req, socket, head, (ws) => {
+ ws.send('hello');
+ ws.close();
+ });
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from('hello'));
+ assert.ok(!isBinary);
+ server.close(done);
+ });
+ });
+ });
+
+ it("closes the connection when path doesn't match", (done) => {
+ const wss = new WebSocket.Server({ port: 0, path: '/ws' }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 13
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+ wss.close(done);
+ });
+ });
+ });
+
+ it('closes the connection when protocol version is Hixie-76', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'WebSocket',
+ 'Sec-WebSocket-Key1': '4 @1 46546xW%0l 1 5',
+ 'Sec-WebSocket-Key2': '12998 5 Y3 1 .P00',
+ 'Sec-WebSocket-Protocol': 'sample'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Missing or invalid Sec-WebSocket-Key header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ describe('#completeUpgrade', () => {
+ it('throws an error if called twice with the same socket', (done) => {
+ const server = http.createServer();
+
+ server.listen(0, () => {
+ const wss = new WebSocket.Server({ noServer: true });
+
+ server.on('upgrade', (req, socket, head) => {
+ wss.handleUpgrade(req, socket, head, (ws) => {
+ ws.close();
+ });
+ assert.throws(
+ () => wss.handleUpgrade(req, socket, head, NOOP),
+ (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'server.handleUpgrade() was called more than once with the ' +
+ 'same socket, possibly due to a misconfiguration'
+ );
+ return true;
+ }
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => {
+ ws.on('close', () => {
+ server.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ describe('Connection establishing', () => {
+ it('fails if the HTTP method is not GET', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.request({
+ method: 'POST',
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 405);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Invalid HTTP method'
+ );
+ wss.close(done);
+ });
+ });
+
+ req.end();
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the Upgrade header field value is not "websocket"', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'foo'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Invalid Upgrade header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Key header is invalid (1/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Missing or invalid Sec-WebSocket-Key header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Key header is invalid (2/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'P5l8BJcZwRc='
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Missing or invalid Sec-WebSocket-Key header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Version header is invalid (1/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ=='
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Missing or invalid Sec-WebSocket-Version header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Version header is invalid (2/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 12
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Missing or invalid Sec-WebSocket-Version header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails is the Sec-WebSocket-Protocol header is invalid', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 13,
+ 'Sec-WebSocket-Protocol': 'foo;bar'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Invalid Sec-WebSocket-Protocol header'
+ );
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Extensions header is invalid', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 13,
+ 'Sec-WebSocket-Extensions':
+ 'permessage-deflate; server_max_window_bits=foo'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(
+ Buffer.concat(chunks).toString(),
+ 'Invalid or unacceptable Sec-WebSocket-Extensions header'
+ );
+ wss.close(done);
+ });
+ });
+ }
+ );
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it("emits the 'wsClientError' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.request({
+ method: 'POST',
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket'
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 400);
+ wss.close(done);
+ });
+
+ req.end();
+ });
+
+ wss.on('wsClientError', (err, socket, request) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Invalid HTTP method');
+
+ assert.ok(request instanceof http.IncomingMessage);
+ assert.strictEqual(request.method, 'POST');
+
+ socket.end('HTTP/1.1 400 Bad Request\r\n\r\n');
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('fails if the WebSocket server is closing or closed', (done) => {
+ const server = http.createServer();
+ const wss = new WebSocket.Server({ noServer: true });
+
+ server.on('upgrade', (req, socket, head) => {
+ wss.close();
+ wss.handleUpgrade(req, socket, head, () => {
+ done(new Error('Unexpected callback invocation'));
+ });
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('unexpected-response', (req, res) => {
+ assert.strictEqual(res.statusCode, 503);
+ res.resume();
+ server.close(done);
+ });
+ });
+ });
+
+ it('handles unsupported extensions', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 13,
+ 'Sec-WebSocket-Extensions': 'foo; bar'
+ }
+ });
+
+ req.on('upgrade', (res, socket, head) => {
+ if (head.length) socket.unshift(head);
+
+ socket.once('data', (chunk) => {
+ assert.strictEqual(chunk[0], 0x88);
+ socket.destroy();
+ wss.close(done);
+ });
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ assert.strictEqual(ws.extensions, '');
+ ws.close();
+ });
+ });
+
+ describe('`verifyClient`', () => {
+ it('can reject client synchronously', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: () => false,
+ port: 0
+ },
+ () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 8
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 401);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('can accept client synchronously', (done) => {
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+
+ const wss = new WebSocket.Server({
+ verifyClient: (info) => {
+ assert.strictEqual(info.origin, 'https://example.com');
+ assert.strictEqual(info.req.headers.foo, 'bar');
+ assert.ok(info.secure, true);
+ return true;
+ },
+ server
+ });
+
+ wss.on('connection', () => {
+ server.close(done);
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`wss://localhost:${server.address().port}`, {
+ headers: { Origin: 'https://example.com', foo: 'bar' },
+ rejectUnauthorized: false
+ });
+
+ ws.on('open', ws.close);
+ });
+ });
+
+ it('can accept client asynchronously', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: (o, cb) => process.nextTick(cb, true),
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', ws.close);
+ }
+ );
+
+ wss.on('connection', () => wss.close(done));
+ });
+
+ it('can reject client asynchronously', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: (info, cb) => process.nextTick(cb, false),
+ port: 0
+ },
+ () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 8
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 401);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('can reject client asynchronously w/ status code', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: (info, cb) => process.nextTick(cb, false, 404),
+ port: 0
+ },
+ () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 8
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 404);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+
+ it('can reject client asynchronously w/ custom headers', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: (info, cb) => {
+ process.nextTick(cb, false, 503, '', { 'Retry-After': 120 });
+ },
+ port: 0
+ },
+ () => {
+ const req = http.get({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 8
+ }
+ });
+
+ req.on('response', (res) => {
+ assert.strictEqual(res.statusCode, 503);
+ assert.strictEqual(res.headers['retry-after'], '120');
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+ });
+ });
+
+ it("doesn't emit the 'connection' event if socket is closed prematurely", (done) => {
+ const server = http.createServer();
+
+ server.listen(0, () => {
+ const wss = new WebSocket.Server({
+ verifyClient: ({ req: { socket } }, cb) => {
+ assert.strictEqual(socket.readable, true);
+ assert.strictEqual(socket.writable, true);
+
+ socket.on('end', () => {
+ assert.strictEqual(socket.readable, false);
+ assert.strictEqual(socket.writable, true);
+ cb(true);
+ });
+ },
+ server
+ });
+
+ wss.on('connection', () => {
+ done(new Error("Unexpected 'connection' event"));
+ });
+
+ const socket = net.connect(
+ {
+ port: server.address().port,
+ allowHalfOpen: true
+ },
+ () => {
+ socket.end(
+ [
+ 'GET / HTTP/1.1',
+ 'Host: localhost',
+ 'Upgrade: websocket',
+ 'Connection: Upgrade',
+ 'Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version: 13',
+ '\r\n'
+ ].join('\r\n')
+ );
+ }
+ );
+
+ socket.on('end', () => {
+ wss.close();
+ server.close(done);
+ });
+ });
+ });
+
+ it('handles data passed along with the upgrade request', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const req = http.request({
+ port: wss.address().port,
+ headers: {
+ Connection: 'Upgrade',
+ Upgrade: 'websocket',
+ 'Sec-WebSocket-Key': 'dGhlIHNhbXBsZSBub25jZQ==',
+ 'Sec-WebSocket-Version': 13
+ }
+ });
+
+ const list = Sender.frame(Buffer.from('Hello'), {
+ fin: true,
+ rsv1: false,
+ opcode: 0x01,
+ mask: true,
+ readOnly: false
+ });
+
+ req.write(Buffer.concat(list));
+ req.end();
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (data, isBinary) => {
+ assert.deepStrictEqual(data, Buffer.from('Hello'));
+ assert.ok(!isBinary);
+ wss.close(done);
+ });
+ });
+ });
+
+ describe('`handleProtocols`', () => {
+ it('allows to select a subprotocol', (done) => {
+ const handleProtocols = (protocols, request) => {
+ assert.ok(request instanceof http.IncomingMessage);
+ assert.strictEqual(request.url, '/');
+ return Array.from(protocols).pop();
+ };
+ const wss = new WebSocket.Server({ handleProtocols, port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, [
+ 'foo',
+ 'bar'
+ ]);
+
+ ws.on('open', () => {
+ assert.strictEqual(ws.protocol, 'bar');
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+ });
+
+ it("emits the 'headers' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', ws.close);
+ });
+
+ wss.on('headers', (headers, request) => {
+ assert.deepStrictEqual(headers.slice(0, 3), [
+ 'HTTP/1.1 101 Switching Protocols',
+ 'Upgrade: websocket',
+ 'Connection: Upgrade'
+ ]);
+ assert.ok(request instanceof http.IncomingMessage);
+ assert.strictEqual(request.url, '/');
+
+ wss.on('connection', () => wss.close(done));
+ });
+ });
+ });
+
+ describe('permessage-deflate', () => {
+ it('is disabled by default', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', ws.close);
+ });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(
+ req.headers['sec-websocket-extensions'],
+ 'permessage-deflate; client_max_window_bits'
+ );
+ assert.strictEqual(ws.extensions, '');
+ wss.close(done);
+ });
+ });
+
+ it('uses configuration options', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { clientMaxWindowBits: 8 },
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('upgrade', (res) => {
+ assert.strictEqual(
+ res.headers['sec-websocket-extensions'],
+ 'permessage-deflate; client_max_window_bits=8'
+ );
+
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/websocket.integration.js b/testing/xpcshell/node-ws/test/websocket.integration.js
new file mode 100644
index 0000000000..abd96c61e4
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/websocket.integration.js
@@ -0,0 +1,55 @@
+'use strict';
+
+const assert = require('assert');
+
+const WebSocket = require('..');
+
+describe('WebSocket', () => {
+ it('communicates successfully with echo service (ws)', (done) => {
+ const ws = new WebSocket('ws://websocket-echo.com/', {
+ protocolVersion: 13
+ });
+
+ let dataReceived = false;
+
+ ws.on('open', () => {
+ ws.send('hello');
+ });
+
+ ws.on('close', () => {
+ assert.ok(dataReceived);
+ done();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ dataReceived = true;
+ assert.ok(!isBinary);
+ assert.strictEqual(message.toString(), 'hello');
+ ws.close();
+ });
+ });
+
+ it('communicates successfully with echo service (wss)', (done) => {
+ const ws = new WebSocket('wss://websocket-echo.com/', {
+ protocolVersion: 13
+ });
+
+ let dataReceived = false;
+
+ ws.on('open', () => {
+ ws.send('hello');
+ });
+
+ ws.on('close', () => {
+ assert.ok(dataReceived);
+ done();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ dataReceived = true;
+ assert.ok(!isBinary);
+ assert.strictEqual(message.toString(), 'hello');
+ ws.close();
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/test/websocket.test.js b/testing/xpcshell/node-ws/test/websocket.test.js
new file mode 100644
index 0000000000..f5fbf16505
--- /dev/null
+++ b/testing/xpcshell/node-ws/test/websocket.test.js
@@ -0,0 +1,4514 @@
+/* eslint no-unused-vars: ["error", { "varsIgnorePattern": "^ws$" }] */
+
+'use strict';
+
+const assert = require('assert');
+const crypto = require('crypto');
+const https = require('https');
+const http = require('http');
+const path = require('path');
+const net = require('net');
+const tls = require('tls');
+const os = require('os');
+const fs = require('fs');
+const { URL } = require('url');
+
+const Sender = require('../lib/sender');
+const WebSocket = require('..');
+const {
+ CloseEvent,
+ ErrorEvent,
+ Event,
+ MessageEvent
+} = require('../lib/event-target');
+const { EMPTY_BUFFER, GUID, kListener, NOOP } = require('../lib/constants');
+
+class CustomAgent extends http.Agent {
+ addRequest() {}
+}
+
+describe('WebSocket', () => {
+ describe('#ctor', () => {
+ it('throws an error when using an invalid url', () => {
+ assert.throws(
+ () => new WebSocket('foo'),
+ /^SyntaxError: Invalid URL: foo$/
+ );
+
+ assert.throws(
+ () => new WebSocket('https://websocket-echo.com'),
+ /^SyntaxError: The URL's protocol must be one of "ws:", "wss:", or "ws\+unix:"$/
+ );
+
+ assert.throws(
+ () => new WebSocket('ws+unix:'),
+ /^SyntaxError: The URL's pathname is empty$/
+ );
+
+ assert.throws(
+ () => new WebSocket('wss://websocket-echo.com#foo'),
+ /^SyntaxError: The URL contains a fragment identifier$/
+ );
+ });
+
+ it('throws an error if a subprotocol is invalid or duplicated', () => {
+ for (const subprotocol of [null, '', 'a,b', ['a', 'a']]) {
+ assert.throws(
+ () => new WebSocket('ws://localhost', subprotocol),
+ /^SyntaxError: An invalid or duplicated subprotocol was specified$/
+ );
+ }
+ });
+
+ it('accepts `url.URL` objects as url', (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req, opts) => {
+ assert.strictEqual(opts.host, '::1');
+ assert.strictEqual(req.path, '/');
+ done();
+ };
+
+ const ws = new WebSocket(new URL('ws://[::1]'), { agent });
+ });
+
+ describe('options', () => {
+ it('accepts the `options` object as 3rd argument', () => {
+ const agent = new CustomAgent();
+ let count = 0;
+ let ws;
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('sec-websocket-protocol'),
+ undefined
+ );
+ count++;
+ };
+
+ ws = new WebSocket('ws://localhost', undefined, { agent });
+ ws = new WebSocket('ws://localhost', [], { agent });
+
+ assert.strictEqual(count, 2);
+ });
+
+ it('accepts the `maxPayload` option', (done) => {
+ const maxPayload = 20480;
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: true,
+ maxPayload
+ });
+
+ ws.on('open', () => {
+ assert.strictEqual(ws._receiver._maxPayload, maxPayload);
+ assert.strictEqual(
+ ws._receiver._extensions['permessage-deflate']._maxPayload,
+ maxPayload
+ );
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('throws an error when using an invalid `protocolVersion`', () => {
+ const options = { agent: new CustomAgent(), protocolVersion: 1000 };
+
+ assert.throws(
+ () => new WebSocket('ws://localhost', options),
+ /^RangeError: Unsupported protocol version: 1000 \(supported versions: 8, 13\)$/
+ );
+ });
+
+ it('honors the `generateMask` option', (done) => {
+ const data = Buffer.from('foo');
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ generateMask() {}
+ });
+
+ ws.on('open', () => {
+ ws.send(data);
+ });
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1005);
+ assert.deepStrictEqual(reason, EMPTY_BUFFER);
+
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ const chunks = [];
+
+ ws._socket.prependListener('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ ws.on('message', (message) => {
+ assert.deepStrictEqual(message, data);
+ assert.deepStrictEqual(
+ Buffer.concat(chunks).slice(2, 6),
+ Buffer.alloc(4)
+ );
+
+ ws.close();
+ });
+ });
+ });
+ });
+ });
+
+ describe('Constants', () => {
+ const readyStates = {
+ CONNECTING: 0,
+ OPEN: 1,
+ CLOSING: 2,
+ CLOSED: 3
+ };
+
+ Object.keys(readyStates).forEach((state) => {
+ describe(`\`${state}\``, () => {
+ it('is enumerable property of class', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(WebSocket, state);
+
+ assert.deepStrictEqual(descriptor, {
+ configurable: false,
+ enumerable: true,
+ value: readyStates[state],
+ writable: false
+ });
+ });
+
+ it('is enumerable property of prototype', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ state
+ );
+
+ assert.deepStrictEqual(descriptor, {
+ configurable: false,
+ enumerable: true,
+ value: readyStates[state],
+ writable: false
+ });
+ });
+ });
+ });
+ });
+
+ describe('Attributes', () => {
+ describe('`binaryType`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'binaryType'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set !== undefined);
+ });
+
+ it("defaults to 'nodebuffer'", () => {
+ const ws = new WebSocket('ws://localhost', {
+ agent: new CustomAgent()
+ });
+
+ assert.strictEqual(ws.binaryType, 'nodebuffer');
+ });
+
+ it("can be changed to 'arraybuffer' or 'fragments'", () => {
+ const ws = new WebSocket('ws://localhost', {
+ agent: new CustomAgent()
+ });
+
+ ws.binaryType = 'arraybuffer';
+ assert.strictEqual(ws.binaryType, 'arraybuffer');
+
+ ws.binaryType = 'foo';
+ assert.strictEqual(ws.binaryType, 'arraybuffer');
+
+ ws.binaryType = 'fragments';
+ assert.strictEqual(ws.binaryType, 'fragments');
+
+ ws.binaryType = '';
+ assert.strictEqual(ws.binaryType, 'fragments');
+
+ ws.binaryType = 'nodebuffer';
+ assert.strictEqual(ws.binaryType, 'nodebuffer');
+ });
+ });
+
+ describe('`bufferedAmount`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'bufferedAmount'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to zero', () => {
+ const ws = new WebSocket('ws://localhost', {
+ agent: new CustomAgent()
+ });
+
+ assert.strictEqual(ws.bufferedAmount, 0);
+ });
+
+ it('defaults to zero upon "open"', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.onopen = () => {
+ assert.strictEqual(ws.bufferedAmount, 0);
+ wss.close(done);
+ };
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('takes into account the data in the sender queue', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws.send('foo');
+
+ assert.strictEqual(ws.bufferedAmount, 3);
+
+ ws.send('bar', (err) => {
+ assert.ifError(err);
+ assert.strictEqual(ws.bufferedAmount, 0);
+ wss.close(done);
+ });
+
+ assert.strictEqual(ws.bufferedAmount, 6);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('takes into account the data in the socket queue', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ const data = Buffer.alloc(1024, 61);
+
+ while (ws.bufferedAmount === 0) {
+ ws.send(data);
+ }
+
+ assert.ok(ws.bufferedAmount > 0);
+ assert.strictEqual(
+ ws.bufferedAmount,
+ ws._socket._writableState.length
+ );
+
+ ws.on('close', () => wss.close(done));
+ ws.close();
+ });
+ });
+ });
+
+ describe('`extensions`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'bufferedAmount'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('exposes the negotiated extensions names (1/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ assert.strictEqual(ws.extensions, '');
+
+ ws.on('open', () => {
+ assert.strictEqual(ws.extensions, '');
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ assert.strictEqual(ws.extensions, '');
+ ws.close();
+ });
+ });
+
+ it('exposes the negotiated extensions names (2/2)', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ assert.strictEqual(ws.extensions, '');
+
+ ws.on('open', () => {
+ assert.strictEqual(ws.extensions, 'permessage-deflate');
+ ws.on('close', () => wss.close(done));
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ assert.strictEqual(ws.extensions, 'permessage-deflate');
+ ws.close();
+ });
+ });
+ });
+
+ describe('`isPaused`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'isPaused'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('indicates whether the websocket is paused', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.pause();
+ assert.ok(ws.isPaused);
+
+ ws.resume();
+ assert.ok(!ws.isPaused);
+
+ ws.close();
+ wss.close(done);
+ });
+
+ assert.ok(!ws.isPaused);
+ });
+ });
+ });
+
+ describe('`protocol`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'protocol'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('exposes the subprotocol selected by the server', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, 'foo');
+
+ assert.strictEqual(ws.extensions, '');
+
+ ws.on('open', () => {
+ assert.strictEqual(ws.protocol, 'foo');
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ assert.strictEqual(ws.protocol, 'foo');
+ ws.close();
+ });
+ });
+ });
+
+ describe('`readyState`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'readyState'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('defaults to `CONNECTING`', () => {
+ const ws = new WebSocket('ws://localhost', {
+ agent: new CustomAgent()
+ });
+
+ assert.strictEqual(ws.readyState, WebSocket.CONNECTING);
+ });
+
+ it('is set to `OPEN` once connection is established', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ assert.strictEqual(ws.readyState, WebSocket.OPEN);
+ ws.close();
+ });
+
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ it('is set to `CLOSED` once connection is closed', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+ wss.close(done);
+ });
+
+ ws.on('open', () => ws.close(1001));
+ });
+ });
+
+ it('is set to `CLOSED` once connection is terminated', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+ wss.close(done);
+ });
+
+ ws.on('open', () => ws.terminate());
+ });
+ });
+ });
+
+ describe('`url`', () => {
+ it('is enumerable and configurable', () => {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ 'url'
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set === undefined);
+ });
+
+ it('exposes the server url', () => {
+ const url = 'ws://localhost';
+ const ws = new WebSocket(url, { agent: new CustomAgent() });
+
+ assert.strictEqual(ws.url, url);
+ });
+ });
+ });
+
+ describe('Events', () => {
+ it("emits an 'error' event if an error occurs", (done) => {
+ let clientCloseEventEmitted = false;
+ let serverClientCloseEventEmitted = false;
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+
+ clientCloseEventEmitted = true;
+ if (serverClientCloseEventEmitted) wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1002);
+ assert.deepStrictEqual(reason, EMPTY_BUFFER);
+
+ serverClientCloseEventEmitted = true;
+ if (clientCloseEventEmitted) wss.close(done);
+ });
+
+ ws._socket.write(Buffer.from([0x85, 0x00]));
+ });
+ });
+
+ it('does not re-emit `net.Socket` errors', (done) => {
+ const codes = ['EPIPE', 'ECONNABORTED', 'ECANCELED', 'ECONNRESET'];
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws._socket.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.ok(codes.includes(err.code), `Unexpected code: ${err.code}`);
+ ws.on('close', (code, message) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(message, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ });
+
+ for (const client of wss.clients) client.terminate();
+ ws.send('foo');
+ ws.send('bar');
+ });
+ });
+ });
+
+ it("emits an 'upgrade' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ ws.on('upgrade', (res) => {
+ assert.ok(res instanceof http.IncomingMessage);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it("emits a 'ping' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ ws.on('ping', () => wss.close(done));
+ });
+
+ wss.on('connection', (ws) => {
+ ws.ping();
+ ws.close();
+ });
+ });
+
+ it("emits a 'pong' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ ws.on('pong', () => wss.close(done));
+ });
+
+ wss.on('connection', (ws) => {
+ ws.pong();
+ ws.close();
+ });
+ });
+
+ it("emits a 'redirect' event", (done) => {
+ const server = http.createServer();
+ const wss = new WebSocket.Server({ noServer: true, path: '/foo' });
+
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: /foo\r\n\r\n');
+ server.once('upgrade', (req, socket, head) => {
+ wss.handleUpgrade(req, socket, head, (ws) => {
+ ws.close();
+ });
+ });
+ });
+
+ server.listen(() => {
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, {
+ followRedirects: true
+ });
+
+ ws.on('redirect', (url, req) => {
+ assert.strictEqual(ws._redirects, 1);
+ assert.strictEqual(url, `ws://localhost:${port}/foo`);
+ assert.ok(req instanceof http.ClientRequest);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ server.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ describe('Connection establishing', () => {
+ const server = http.createServer();
+
+ beforeEach((done) => server.listen(0, done));
+ afterEach((done) => server.close(done));
+
+ it('fails if the Upgrade header field value is not "websocket"', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.on('end', socket.end);
+ socket.write(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Connection: Upgrade\r\n' +
+ 'Upgrade: foo\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Invalid Upgrade header');
+ done();
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Accept header is invalid', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.on('end', socket.end);
+ socket.write(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ 'Sec-WebSocket-Accept: CxYS6+NgJSBG74mdgLvGscRvpns=\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Invalid Sec-WebSocket-Accept header');
+ done();
+ });
+ });
+
+ it('close event is raised when server closes connection', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+ done();
+ });
+ });
+
+ it('error is emitted if server aborts connection', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ `HTTP/1.1 401 ${http.STATUS_CODES[401]}\r\n` +
+ 'Connection: close\r\n' +
+ 'Content-type: text/html\r\n' +
+ `Content-Length: ${http.STATUS_CODES[401].length}\r\n` +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Unexpected server response: 401');
+ done();
+ });
+ });
+
+ it('unexpected response can be read when sent by server', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ `HTTP/1.1 401 ${http.STATUS_CODES[401]}\r\n` +
+ 'Connection: close\r\n' +
+ 'Content-type: text/html\r\n' +
+ 'Content-Length: 3\r\n' +
+ '\r\n' +
+ 'foo'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', () => done(new Error("Unexpected 'error' event")));
+ ws.on('unexpected-response', (req, res) => {
+ assert.strictEqual(res.statusCode, 401);
+
+ let data = '';
+
+ res.on('data', (v) => {
+ data += v;
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(data, 'foo');
+ done();
+ });
+ });
+ });
+
+ it('request can be aborted when unexpected response is sent by server', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ `HTTP/1.1 401 ${http.STATUS_CODES[401]}\r\n` +
+ 'Connection: close\r\n' +
+ 'Content-type: text/html\r\n' +
+ 'Content-Length: 3\r\n' +
+ '\r\n' +
+ 'foo'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', () => done(new Error("Unexpected 'error' event")));
+ ws.on('unexpected-response', (req, res) => {
+ assert.strictEqual(res.statusCode, 401);
+
+ res.on('end', done);
+ req.abort();
+ });
+ });
+
+ it('fails if the opening handshake timeout expires', (done) => {
+ server.once('upgrade', (req, socket) => socket.on('end', socket.end));
+
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, {
+ handshakeTimeout: 100
+ });
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Opening handshake has timed out');
+ done();
+ });
+ });
+
+ it('fails if an unexpected Sec-WebSocket-Extensions header is received', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ 'Sec-WebSocket-Extensions: foo\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, {
+ perMessageDeflate: false
+ });
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Server sent a Sec-WebSocket-Extensions header but no extension ' +
+ 'was requested'
+ );
+ ws.on('close', () => done());
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Extensions header is invalid (1/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ 'Sec-WebSocket-Extensions: foo;=\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Invalid Sec-WebSocket-Extensions header'
+ );
+ ws.on('close', () => done());
+ });
+ });
+
+ it('fails if the Sec-WebSocket-Extensions header is invalid (2/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ 'Sec-WebSocket-Extensions: ' +
+ 'permessage-deflate; client_max_window_bits=7\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Invalid Sec-WebSocket-Extensions header'
+ );
+ ws.on('close', () => done());
+ });
+ });
+
+ it('fails if an unexpected extension is received (1/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ 'Sec-WebSocket-Extensions: foo\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Server indicated an extension that was not requested'
+ );
+ ws.on('close', () => done());
+ });
+ });
+
+ it('fails if an unexpected extension is received (2/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ 'Sec-WebSocket-Extensions: permessage-deflate,foo\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Server indicated an extension that was not requested'
+ );
+ ws.on('close', () => done());
+ });
+ });
+
+ it('fails if server sends a subprotocol when none was requested', (done) => {
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('headers', (headers) => {
+ headers.push('Sec-WebSocket-Protocol: foo');
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'Server sent a subprotocol but none was requested'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ it('fails if server sends an invalid subprotocol (1/2)', (done) => {
+ const wss = new WebSocket.Server({
+ handleProtocols: () => 'baz',
+ server
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, [
+ 'foo',
+ 'bar'
+ ]);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Server sent an invalid subprotocol');
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ it('fails if server sends an invalid subprotocol (2/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ const key = crypto
+ .createHash('sha1')
+ .update(req.headers['sec-websocket-key'] + GUID)
+ .digest('base64');
+
+ socket.end(
+ 'HTTP/1.1 101 Switching Protocols\r\n' +
+ 'Upgrade: websocket\r\n' +
+ 'Connection: Upgrade\r\n' +
+ `Sec-WebSocket-Accept: ${key}\r\n` +
+ 'Sec-WebSocket-Protocol:\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, [
+ 'foo',
+ 'bar'
+ ]);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Server sent an invalid subprotocol');
+ ws.on('close', () => done());
+ });
+ });
+
+ it('fails if server sends no subprotocol', (done) => {
+ const wss = new WebSocket.Server({
+ handleProtocols() {},
+ server
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, [
+ 'foo',
+ 'bar'
+ ]);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Server sent no subprotocol');
+ ws.on('close', () => wss.close(done));
+ });
+ });
+
+ it('does not follow redirects by default', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 301 Moved Permanently\r\n' +
+ 'Location: ws://localhost:8080\r\n' +
+ '\r\n'
+ );
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Unexpected server response: 301');
+ assert.strictEqual(ws._redirects, 0);
+ ws.on('close', () => done());
+ });
+ });
+
+ it('honors the `followRedirects` option', (done) => {
+ const wss = new WebSocket.Server({ noServer: true, path: '/foo' });
+
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: /foo\r\n\r\n');
+ server.once('upgrade', (req, socket, head) => {
+ wss.handleUpgrade(req, socket, head, NOOP);
+ });
+ });
+
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, {
+ followRedirects: true
+ });
+
+ ws.on('open', () => {
+ assert.strictEqual(ws.url, `ws://localhost:${port}/foo`);
+ assert.strictEqual(ws._redirects, 1);
+ ws.on('close', () => done());
+ ws.close();
+ });
+ });
+
+ it('honors the `maxRedirects` option', (done) => {
+ const onUpgrade = (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: /\r\n\r\n');
+ };
+
+ server.on('upgrade', onUpgrade);
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, {
+ followRedirects: true,
+ maxRedirects: 1
+ });
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'Maximum redirects exceeded');
+ assert.strictEqual(ws._redirects, 2);
+
+ server.removeListener('upgrade', onUpgrade);
+ ws.on('close', () => done());
+ });
+ });
+
+ it('emits an error if the redirect URL is invalid (1/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: ws://\r\n\r\n');
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, {
+ followRedirects: true
+ });
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof SyntaxError);
+ assert.strictEqual(err.message, 'Invalid URL: ws://');
+ assert.strictEqual(ws._redirects, 1);
+
+ ws.on('close', () => done());
+ });
+ });
+
+ it('emits an error if the redirect URL is invalid (2/2)', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: http://localhost\r\n\r\n');
+ });
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, {
+ followRedirects: true
+ });
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof SyntaxError);
+ assert.strictEqual(
+ err.message,
+ 'The URL\'s protocol must be one of "ws:", "wss:", or "ws+unix:"'
+ );
+ assert.strictEqual(ws._redirects, 1);
+
+ ws.on('close', () => done());
+ });
+ });
+
+ it('uses the first url userinfo when following redirects', (done) => {
+ const wss = new WebSocket.Server({ noServer: true, path: '/foo' });
+ const authorization = 'Basic Zm9vOmJhcg==';
+
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://baz:qux@localhost:${port}/foo\r\n\r\n`
+ );
+ server.once('upgrade', (req, socket, head) => {
+ wss.handleUpgrade(req, socket, head, (ws, req) => {
+ assert.strictEqual(req.headers.authorization, authorization);
+ ws.close();
+ });
+ });
+ });
+
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://foo:bar@localhost:${port}`, {
+ followRedirects: true
+ });
+
+ assert.strictEqual(ws._req.getHeader('Authorization'), authorization);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws://baz:qux@localhost:${port}/foo`);
+ assert.strictEqual(ws._redirects, 1);
+
+ wss.close(done);
+ });
+ });
+
+ describe('When moving away from a secure context', () => {
+ function proxy(httpServer, httpsServer) {
+ const server = net.createServer({ allowHalfOpen: true });
+
+ server.on('connection', (socket) => {
+ socket.on('readable', function read() {
+ socket.removeListener('readable', read);
+
+ const buf = socket.read(1);
+ const target = buf[0] === 22 ? httpsServer : httpServer;
+
+ socket.unshift(buf);
+ target.emit('connection', socket);
+ });
+ });
+
+ return server;
+ }
+
+ describe("If there is no 'redirect' event listener", () => {
+ it('drops the `auth` option', (done) => {
+ const httpServer = http.createServer();
+ const httpsServer = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const server = proxy(httpServer, httpsServer);
+
+ server.listen(() => {
+ const port = server.address().port;
+
+ httpsServer.on('upgrade', (req, socket) => {
+ socket.on('error', NOOP);
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const wss = new WebSocket.Server({ server: httpServer });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ ws.close();
+ });
+
+ const ws = new WebSocket(`wss://localhost:${port}`, {
+ auth: 'foo:bar',
+ followRedirects: true,
+ rejectUnauthorized: false
+ });
+
+ assert.strictEqual(
+ ws._req.getHeader('Authorization'),
+ 'Basic Zm9vOmJhcg=='
+ );
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws://localhost:${port}/`);
+ assert.strictEqual(ws._redirects, 1);
+
+ server.close(done);
+ });
+ });
+ });
+
+ it('drops the Authorization and Cookie headers', (done) => {
+ const httpServer = http.createServer();
+ const httpsServer = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const server = proxy(httpServer, httpsServer);
+
+ server.listen(() => {
+ const port = server.address().port;
+
+ httpsServer.on('upgrade', (req, socket) => {
+ socket.on('error', NOOP);
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const wss = new WebSocket.Server({ server: httpServer });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ assert.strictEqual(req.headers.cookie, undefined);
+ assert.strictEqual(req.headers.host, headers.host);
+
+ ws.close();
+ });
+
+ const ws = new WebSocket(`wss://localhost:${port}`, {
+ followRedirects: true,
+ headers,
+ rejectUnauthorized: false
+ });
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws://localhost:${port}/`);
+ assert.strictEqual(ws._redirects, 1);
+
+ server.close(done);
+ });
+ });
+ });
+ });
+
+ describe("If there is at least one 'redirect' event listener", () => {
+ it('does not drop any headers by default', (done) => {
+ const httpServer = http.createServer();
+ const httpsServer = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const server = proxy(httpServer, httpsServer);
+
+ server.listen(() => {
+ const port = server.address().port;
+
+ httpsServer.on('upgrade', (req, socket) => {
+ socket.on('error', NOOP);
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const wss = new WebSocket.Server({ server: httpServer });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(
+ req.headers.authorization,
+ headers.authorization
+ );
+ assert.strictEqual(req.headers.cookie, headers.cookie);
+ assert.strictEqual(req.headers.host, headers.host);
+
+ ws.close();
+ });
+
+ const ws = new WebSocket(`wss://localhost:${port}`, {
+ followRedirects: true,
+ headers,
+ rejectUnauthorized: false
+ });
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('redirect', (url, req) => {
+ assert.strictEqual(ws._redirects, 1);
+ assert.strictEqual(url, `ws://localhost:${port}/`);
+ assert.notStrictEqual(firstRequest, req);
+ assert.strictEqual(
+ req.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(req.getHeader('Cookie'), headers.cookie);
+ assert.strictEqual(req.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ server.close(done);
+ });
+ });
+ });
+ });
+ });
+ });
+
+ describe('When the redirect host is different', () => {
+ describe("If there is no 'redirect' event listener", () => {
+ it('drops the `auth` option', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const ws = new WebSocket(
+ `ws://localhost:${server.address().port}`,
+ {
+ auth: 'foo:bar',
+ followRedirects: true
+ }
+ );
+
+ assert.strictEqual(
+ ws._req.getHeader('Authorization'),
+ 'Basic Zm9vOmJhcg=='
+ );
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws://localhost:${port}/`);
+ assert.strictEqual(ws._redirects, 1);
+
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ ws.close();
+ });
+ });
+
+ it('drops the Authorization, Cookie and Host headers (1/4)', (done) => {
+ // Test the `ws:` to `ws:` case.
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const ws = new WebSocket(
+ `ws://localhost:${server.address().port}`,
+ { followRedirects: true, headers }
+ );
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws://localhost:${port}/`);
+ assert.strictEqual(ws._redirects, 1);
+
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ assert.strictEqual(req.headers.cookie, undefined);
+ assert.strictEqual(
+ req.headers.host,
+ `localhost:${wss.address().port}`
+ );
+
+ ws.close();
+ });
+ });
+
+ it('drops the Authorization, Cookie and Host headers (2/4)', function (done) {
+ if (process.platform === 'win32') return this.skip();
+
+ // Test the `ws:` to `ws+unix:` case.
+
+ const socketPath = path.join(
+ os.tmpdir(),
+ `ws.${crypto.randomBytes(16).toString('hex')}.sock`
+ );
+
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ `HTTP/1.1 302 Found\r\nLocation: ws+unix://${socketPath}\r\n\r\n`
+ );
+ });
+
+ const redirectedServer = http.createServer();
+ const wss = new WebSocket.Server({ server: redirectedServer });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ assert.strictEqual(req.headers.cookie, undefined);
+ assert.strictEqual(req.headers.host, 'localhost');
+
+ ws.close();
+ });
+
+ redirectedServer.listen(socketPath, () => {
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const ws = new WebSocket(
+ `ws://localhost:${server.address().port}`,
+ { followRedirects: true, headers }
+ );
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws+unix://${socketPath}`);
+ assert.strictEqual(ws._redirects, 1);
+
+ redirectedServer.close(done);
+ });
+ });
+ });
+
+ it('drops the Authorization, Cookie and Host headers (3/4)', function (done) {
+ if (process.platform === 'win32') return this.skip();
+
+ // Test the `ws+unix:` to `ws+unix:` case.
+
+ const redirectingServerSocketPath = path.join(
+ os.tmpdir(),
+ `ws.${crypto.randomBytes(16).toString('hex')}.sock`
+ );
+ const redirectedServerSocketPath = path.join(
+ os.tmpdir(),
+ `ws.${crypto.randomBytes(16).toString('hex')}.sock`
+ );
+
+ const redirectingServer = http.createServer();
+
+ redirectingServer.on('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws+unix://${redirectedServerSocketPath}\r\n\r\n`
+ );
+ });
+
+ const redirectedServer = http.createServer();
+ const wss = new WebSocket.Server({ server: redirectedServer });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ assert.strictEqual(req.headers.cookie, undefined);
+ assert.strictEqual(req.headers.host, 'localhost');
+
+ ws.close();
+ });
+
+ redirectingServer.listen(redirectingServerSocketPath, listening);
+ redirectedServer.listen(redirectedServerSocketPath, listening);
+
+ let callCount = 0;
+
+ function listening() {
+ if (++callCount !== 2) return;
+
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const ws = new WebSocket(
+ `ws+unix://${redirectingServerSocketPath}`,
+ { followRedirects: true, headers }
+ );
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(
+ ws.url,
+ `ws+unix://${redirectedServerSocketPath}`
+ );
+ assert.strictEqual(ws._redirects, 1);
+
+ redirectingServer.close();
+ redirectedServer.close(done);
+ });
+ }
+ });
+
+ it('drops the Authorization, Cookie and Host headers (4/4)', function (done) {
+ if (process.platform === 'win32') return this.skip();
+
+ // Test the `ws+unix:` to `ws:` case.
+
+ const redirectingServer = http.createServer();
+ const redirectedServer = http.createServer();
+ const wss = new WebSocket.Server({ server: redirectedServer });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ assert.strictEqual(req.headers.cookie, undefined);
+ assert.strictEqual(
+ req.headers.host,
+ `localhost:${redirectedServer.address().port}`
+ );
+
+ ws.close();
+ });
+
+ const socketPath = path.join(
+ os.tmpdir(),
+ `ws.${crypto.randomBytes(16).toString('hex')}.sock`
+ );
+
+ redirectingServer.listen(socketPath, listening);
+ redirectedServer.listen(0, listening);
+
+ let callCount = 0;
+
+ function listening() {
+ if (++callCount !== 2) return;
+
+ const port = redirectedServer.address().port;
+
+ redirectingServer.on('upgrade', (req, socket) => {
+ socket.end(
+ `HTTP/1.1 302 Found\r\nLocation: ws://localhost:${port}\r\n\r\n`
+ );
+ });
+
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const ws = new WebSocket(`ws+unix://${socketPath}`, {
+ followRedirects: true,
+ headers
+ });
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.url, `ws://localhost:${port}/`);
+ assert.strictEqual(ws._redirects, 1);
+
+ redirectingServer.close();
+ redirectedServer.close(done);
+ });
+ }
+ });
+ });
+
+ describe("If there is at least one 'redirect' event listener", () => {
+ it('does not drop any headers by default', (done) => {
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar',
+ host: 'foo'
+ };
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const ws = new WebSocket(
+ `ws://localhost:${server.address().port}`,
+ { followRedirects: true, headers }
+ );
+
+ const firstRequest = ws._req;
+
+ assert.strictEqual(
+ firstRequest.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(
+ firstRequest.getHeader('Cookie'),
+ headers.cookie
+ );
+ assert.strictEqual(firstRequest.getHeader('Host'), headers.host);
+
+ ws.on('redirect', (url, req) => {
+ assert.strictEqual(ws._redirects, 1);
+ assert.strictEqual(url, `ws://localhost:${port}/`);
+ assert.notStrictEqual(firstRequest, req);
+ assert.strictEqual(
+ req.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(req.getHeader('Cookie'), headers.cookie);
+ assert.strictEqual(req.getHeader('Host'), headers.host);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(
+ req.headers.authorization,
+ headers.authorization
+ );
+ assert.strictEqual(req.headers.cookie, headers.cookie);
+ assert.strictEqual(req.headers.host, headers.host);
+ ws.close();
+ });
+ });
+ });
+ });
+
+ describe("In a listener of the 'redirect' event", () => {
+ it('allows to abort the request without swallowing errors', (done) => {
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: /foo\r\n\r\n');
+ });
+
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, {
+ followRedirects: true
+ });
+
+ ws.on('redirect', (url, req) => {
+ assert.strictEqual(ws._redirects, 1);
+ assert.strictEqual(url, `ws://localhost:${port}/foo`);
+
+ req.on('socket', () => {
+ req.abort();
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.message, 'socket hang up');
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1006);
+ done();
+ });
+ });
+ });
+ });
+
+ it('allows to remove headers', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+
+ server.once('upgrade', (req, socket) => {
+ socket.end(
+ 'HTTP/1.1 302 Found\r\n' +
+ `Location: ws://localhost:${port}/\r\n\r\n`
+ );
+ });
+
+ const headers = {
+ authorization: 'Basic Zm9vOmJhcg==',
+ cookie: 'foo=bar'
+ };
+
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, {
+ followRedirects: true,
+ headers
+ });
+
+ ws.on('redirect', (url, req) => {
+ assert.strictEqual(ws._redirects, 1);
+ assert.strictEqual(url, `ws://localhost:${port}/`);
+ assert.strictEqual(
+ req.getHeader('Authorization'),
+ headers.authorization
+ );
+ assert.strictEqual(req.getHeader('Cookie'), headers.cookie);
+
+ req.removeHeader('authorization');
+ req.removeHeader('cookie');
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', (ws, req) => {
+ assert.strictEqual(req.headers.authorization, undefined);
+ assert.strictEqual(req.headers.cookie, undefined);
+ ws.close();
+ });
+ });
+ });
+ });
+
+ describe('Connection with query string', () => {
+ it('connects when pathname is not null', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}/?token=qwerty`);
+
+ ws.on('open', () => {
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('connects when pathname is null', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const port = wss.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}?token=qwerty`);
+
+ ws.on('open', () => {
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+ });
+
+ describe('#pause', () => {
+ it('does nothing if `readyState` is `CONNECTING` or `CLOSED`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ assert.strictEqual(ws.readyState, WebSocket.CONNECTING);
+ assert.ok(!ws.isPaused);
+
+ ws.pause();
+ assert.ok(!ws.isPaused);
+
+ ws.on('open', () => {
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+
+ ws.pause();
+ assert.ok(!ws.isPaused);
+
+ wss.close(done);
+ });
+
+ ws.close();
+ });
+ });
+ });
+
+ it('pauses the socket', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ assert.ok(!ws.isPaused);
+ assert.ok(!ws._socket.isPaused());
+
+ ws.pause();
+ assert.ok(ws.isPaused);
+ assert.ok(ws._socket.isPaused());
+
+ ws.terminate();
+ wss.close(done);
+ });
+ });
+ });
+
+ describe('#ping', () => {
+ it('throws an error if `readyState` is `CONNECTING`', () => {
+ const ws = new WebSocket('ws://localhost', {
+ lookup() {}
+ });
+
+ assert.throws(
+ () => ws.ping(),
+ /^Error: WebSocket is not open: readyState 0 \(CONNECTING\)$/
+ );
+
+ assert.throws(
+ () => ws.ping(NOOP),
+ /^Error: WebSocket is not open: readyState 0 \(CONNECTING\)$/
+ );
+ });
+
+ it('increases `bufferedAmount` if `readyState` is 2 or 3', (done) => {
+ const ws = new WebSocket('ws://localhost', {
+ lookup() {}
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.strictEqual(ws.bufferedAmount, 0);
+
+ ws.ping('hi');
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.ping();
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+
+ ws.ping('hi');
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ ws.ping();
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ done();
+ });
+ });
+
+ ws.close();
+ });
+
+ it('calls the callback w/ an error if `readyState` is 2 or 3', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+
+ assert.strictEqual(ws.bufferedAmount, 0);
+
+ ws.ping('hi', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 2 (CLOSING)'
+ );
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.on('close', () => {
+ ws.ping((err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 3 (CLOSED)'
+ );
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ wss.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ it('can send a ping with no data', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.ping(() => {
+ ws.ping();
+ ws.close();
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ let pings = 0;
+ ws.on('ping', (data) => {
+ assert.ok(Buffer.isBuffer(data));
+ assert.strictEqual(data.length, 0);
+ if (++pings === 2) wss.close(done);
+ });
+ });
+ });
+
+ it('can send a ping with data', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.ping('hi', () => {
+ ws.ping('hi', true);
+ ws.close();
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ let pings = 0;
+ ws.on('ping', (message) => {
+ assert.strictEqual(message.toString(), 'hi');
+ if (++pings === 2) wss.close(done);
+ });
+ });
+ });
+
+ it('can send numbers as ping payload', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.ping(0);
+ ws.close();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('ping', (message) => {
+ assert.strictEqual(message.toString(), '0');
+ wss.close(done);
+ });
+ });
+ });
+
+ it('throws an error if the data size is greater than 125 bytes', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ assert.throws(
+ () => ws.ping(Buffer.alloc(126)),
+ /^RangeError: The data size must not be greater than 125 bytes$/
+ );
+
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+ });
+
+ describe('#pong', () => {
+ it('throws an error if `readyState` is `CONNECTING`', () => {
+ const ws = new WebSocket('ws://localhost', {
+ lookup() {}
+ });
+
+ assert.throws(
+ () => ws.pong(),
+ /^Error: WebSocket is not open: readyState 0 \(CONNECTING\)$/
+ );
+
+ assert.throws(
+ () => ws.pong(NOOP),
+ /^Error: WebSocket is not open: readyState 0 \(CONNECTING\)$/
+ );
+ });
+
+ it('increases `bufferedAmount` if `readyState` is 2 or 3', (done) => {
+ const ws = new WebSocket('ws://localhost', {
+ lookup() {}
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.strictEqual(ws.bufferedAmount, 0);
+
+ ws.pong('hi');
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.pong();
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+
+ ws.pong('hi');
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ ws.pong();
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ done();
+ });
+ });
+
+ ws.close();
+ });
+
+ it('calls the callback w/ an error if `readyState` is 2 or 3', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+
+ assert.strictEqual(ws.bufferedAmount, 0);
+
+ ws.pong('hi', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 2 (CLOSING)'
+ );
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.on('close', () => {
+ ws.pong((err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 3 (CLOSED)'
+ );
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ wss.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ it('can send a pong with no data', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.pong(() => {
+ ws.pong();
+ ws.close();
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ let pongs = 0;
+ ws.on('pong', (data) => {
+ assert.ok(Buffer.isBuffer(data));
+ assert.strictEqual(data.length, 0);
+ if (++pongs === 2) wss.close(done);
+ });
+ });
+ });
+
+ it('can send a pong with data', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.pong('hi', () => {
+ ws.pong('hi', true);
+ ws.close();
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ let pongs = 0;
+ ws.on('pong', (message) => {
+ assert.strictEqual(message.toString(), 'hi');
+ if (++pongs === 2) wss.close(done);
+ });
+ });
+ });
+
+ it('can send numbers as pong payload', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.pong(0);
+ ws.close();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('pong', (message) => {
+ assert.strictEqual(message.toString(), '0');
+ wss.close(done);
+ });
+ });
+ });
+
+ it('throws an error if the data size is greater than 125 bytes', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ assert.throws(
+ () => ws.pong(Buffer.alloc(126)),
+ /^RangeError: The data size must not be greater than 125 bytes$/
+ );
+
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+ });
+
+ describe('#resume', () => {
+ it('does nothing if `readyState` is `CONNECTING` or `CLOSED`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ assert.strictEqual(ws.readyState, WebSocket.CONNECTING);
+ assert.ok(!ws.isPaused);
+
+ // Verify that no exception is thrown.
+ ws.resume();
+
+ ws.on('open', () => {
+ ws.pause();
+ assert.ok(ws.isPaused);
+
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+
+ ws.resume();
+ assert.ok(ws.isPaused);
+
+ wss.close(done);
+ });
+
+ ws.terminate();
+ });
+ });
+ });
+
+ it('resumes the socket', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ assert.ok(!ws.isPaused);
+ assert.ok(!ws._socket.isPaused());
+
+ ws.pause();
+ assert.ok(ws.isPaused);
+ assert.ok(ws._socket.isPaused());
+
+ ws.resume();
+ assert.ok(!ws.isPaused);
+ assert.ok(!ws._socket.isPaused());
+
+ ws.close();
+ wss.close(done);
+ });
+ });
+ });
+
+ describe('#send', () => {
+ it('throws an error if `readyState` is `CONNECTING`', () => {
+ const ws = new WebSocket('ws://localhost', {
+ lookup() {}
+ });
+
+ assert.throws(
+ () => ws.send('hi'),
+ /^Error: WebSocket is not open: readyState 0 \(CONNECTING\)$/
+ );
+
+ assert.throws(
+ () => ws.send('hi', NOOP),
+ /^Error: WebSocket is not open: readyState 0 \(CONNECTING\)$/
+ );
+ });
+
+ it('increases `bufferedAmount` if `readyState` is 2 or 3', (done) => {
+ const ws = new WebSocket('ws://localhost', {
+ lookup() {}
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.strictEqual(ws.bufferedAmount, 0);
+
+ ws.send('hi');
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.send();
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.on('close', () => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+
+ ws.send('hi');
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ ws.send();
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ done();
+ });
+ });
+
+ ws.close();
+ });
+
+ it('calls the callback w/ an error if `readyState` is 2 or 3', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+
+ assert.strictEqual(ws.bufferedAmount, 0);
+
+ ws.send('hi', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 2 (CLOSING)'
+ );
+ assert.strictEqual(ws.bufferedAmount, 2);
+
+ ws.on('close', () => {
+ ws.send('hi', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket is not open: readyState 3 (CLOSED)'
+ );
+ assert.strictEqual(ws.bufferedAmount, 4);
+
+ wss.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ it('can send a big binary message', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const array = new Float32Array(5 * 1024 * 1024);
+
+ for (let i = 0; i < array.length; i++) {
+ array[i] = i / 5;
+ }
+
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.send(array));
+ ws.on('message', (msg, isBinary) => {
+ assert.deepStrictEqual(msg, Buffer.from(array.buffer));
+ assert.ok(isBinary);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(msg);
+ ws.close();
+ });
+ });
+ });
+
+ it('can send text data', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.send('hi'));
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from('hi'));
+ assert.ok(!isBinary);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ ws.send(msg, { binary: isBinary });
+ ws.close();
+ });
+ });
+ });
+
+ it('does not override the `fin` option', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send('fragment', { fin: false });
+ ws.send('fragment', { fin: true });
+ ws.close();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.deepStrictEqual(msg, Buffer.from('fragmentfragment'));
+ assert.ok(!isBinary);
+ wss.close(done);
+ });
+ });
+ });
+
+ it('sends numbers as strings', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send(0);
+ ws.close();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.deepStrictEqual(msg, Buffer.from('0'));
+ assert.ok(!isBinary);
+ wss.close(done);
+ });
+ });
+ });
+
+ it('can send a `TypedArray`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const array = new Float32Array(6);
+
+ for (let i = 0; i < array.length; ++i) {
+ array[i] = i / 2;
+ }
+
+ const partial = array.subarray(2, 5);
+ const buf = Buffer.from(
+ partial.buffer,
+ partial.byteOffset,
+ partial.byteLength
+ );
+
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send(partial);
+ ws.close();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, buf);
+ assert.ok(isBinary);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(msg);
+ });
+ });
+ });
+
+ it('can send an `ArrayBuffer`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const array = new Float32Array(5);
+
+ for (let i = 0; i < array.length; ++i) {
+ array[i] = i / 2;
+ }
+
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send(array.buffer);
+ ws.close();
+ });
+
+ ws.onmessage = (event) => {
+ assert.ok(event.data.equals(Buffer.from(array.buffer)));
+ wss.close(done);
+ };
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(msg);
+ });
+ });
+ });
+
+ it('can send a `Buffer`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const buf = Buffer.from('foobar');
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send(buf);
+ ws.close();
+ });
+
+ ws.onmessage = (event) => {
+ assert.deepStrictEqual(event.data, buf);
+ wss.close(done);
+ };
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(msg);
+ });
+ });
+ });
+
+ it('calls the callback when data is written out', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send('hi', (err) => {
+ assert.ifError(err);
+ wss.close(done);
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.close();
+ });
+ });
+
+ it('works when the `data` argument is falsy', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws.send();
+ ws.close();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ assert.strictEqual(message, EMPTY_BUFFER);
+ assert.ok(isBinary);
+ wss.close(done);
+ });
+ });
+ });
+
+ it('honors the `mask` option', (done) => {
+ let clientCloseEventEmitted = false;
+ let serverClientCloseEventEmitted = false;
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.send('hi', { mask: false }));
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1002);
+ assert.deepStrictEqual(reason, EMPTY_BUFFER);
+
+ clientCloseEventEmitted = true;
+ if (serverClientCloseEventEmitted) wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ const chunks = [];
+
+ ws._socket.prependListener('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: MASK must be set'
+ );
+ assert.ok(
+ Buffer.concat(chunks).slice(0, 2).equals(Buffer.from('8102', 'hex'))
+ );
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+
+ serverClientCloseEventEmitted = true;
+ if (clientCloseEventEmitted) wss.close(done);
+ });
+ });
+ });
+ });
+ });
+
+ describe('#close', () => {
+ it('closes the connection if called while connecting (1/3)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ ws.close(1001);
+ });
+ });
+
+ it('closes the connection if called while connecting (2/3)', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: (info, cb) => setTimeout(cb, 300, true),
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ setTimeout(() => ws.close(1001), 150);
+ }
+ );
+ });
+
+ it('closes the connection if called while connecting (3/3)', (done) => {
+ const server = http.createServer();
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => {
+ server.close(done);
+ });
+ });
+
+ ws.on('unexpected-response', (req, res) => {
+ assert.strictEqual(res.statusCode, 502);
+
+ const chunks = [];
+
+ res.on('data', (chunk) => {
+ chunks.push(chunk);
+ });
+
+ res.on('end', () => {
+ assert.strictEqual(Buffer.concat(chunks).toString(), 'foo');
+ ws.close();
+ });
+ });
+ });
+
+ server.on('upgrade', (req, socket) => {
+ socket.on('end', socket.end);
+
+ socket.write(
+ `HTTP/1.1 502 ${http.STATUS_CODES[502]}\r\n` +
+ 'Connection: keep-alive\r\n' +
+ 'Content-type: text/html\r\n' +
+ 'Content-Length: 3\r\n' +
+ '\r\n' +
+ 'foo'
+ );
+ });
+ });
+
+ it('can be called from an error listener while connecting', (done) => {
+ const ws = new WebSocket('ws://localhost:1337');
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'ECONNREFUSED');
+ ws.close();
+ ws.on('close', () => done());
+ });
+ }).timeout(4000);
+
+ it("can be called from a listener of the 'redirect' event", (done) => {
+ const server = http.createServer();
+
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: /foo\r\n\r\n');
+ });
+
+ server.listen(() => {
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, {
+ followRedirects: true
+ });
+
+ ws.on('open', () => {
+ done(new Error("Unexpected 'open' event"));
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1006);
+ server.close(done);
+ });
+ });
+
+ ws.on('redirect', () => {
+ ws.close();
+ });
+ });
+ });
+
+ it("can be called from a listener of the 'upgrade' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ ws.on('upgrade', () => ws.close());
+ });
+ });
+
+ it('sends the close status code only when necessary', (done) => {
+ let sent;
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws._socket.once('data', (data) => {
+ sent = data;
+ });
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws._socket.once('data', (received) => {
+ assert.deepStrictEqual(
+ received.slice(0, 2),
+ Buffer.from([0x88, 0x80])
+ );
+ assert.deepStrictEqual(sent, Buffer.from([0x88, 0x00]));
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ });
+ ws.close();
+ });
+ });
+
+ it('works when close reason is not specified', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.close(1000));
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('close', (code, message) => {
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(message, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ });
+ });
+
+ it('works when close reason is specified', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => ws.close(1000, 'some reason'));
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('close', (code, message) => {
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(message, Buffer.from('some reason'));
+ wss.close(done);
+ });
+ });
+ });
+
+ it('permits all buffered data to be delivered', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { threshold: 0 },
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const messages = [];
+
+ ws.on('message', (message, isBinary) => {
+ assert.ok(!isBinary);
+ messages.push(message.toString());
+ });
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.deepStrictEqual(messages, ['foo', 'bar', 'baz']);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ const callback = (err) => assert.ifError(err);
+
+ ws.send('foo', callback);
+ ws.send('bar', callback);
+ ws.send('baz', callback);
+ ws.close();
+ ws.close();
+ });
+ });
+
+ it('allows close code 1013', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1013);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => ws.close(1013));
+ });
+
+ it('allows close code 1014', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1014);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => ws.close(1014));
+ });
+
+ it('does nothing if `readyState` is `CLOSED`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+ ws.close();
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => ws.close());
+ });
+
+ it('sets a timer for the closing handshake to complete', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(reason, Buffer.from('some reason'));
+ wss.close(done);
+ });
+
+ ws.on('open', () => {
+ let callbackCalled = false;
+
+ assert.strictEqual(ws._closeTimer, null);
+
+ ws.send('foo', () => {
+ callbackCalled = true;
+ });
+
+ ws.close(1000, 'some reason');
+
+ //
+ // Check that the close timer is set even if the `Sender.close()`
+ // callback is not called.
+ //
+ assert.strictEqual(callbackCalled, false);
+ assert.strictEqual(ws._closeTimer._idleTimeout, 30000);
+ });
+ });
+ });
+ });
+
+ describe('#terminate', () => {
+ it('closes the connection if called while connecting (1/2)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ ws.terminate();
+ });
+ });
+
+ it('closes the connection if called while connecting (2/2)', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ verifyClient: (info, cb) => setTimeout(cb, 300, true),
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ setTimeout(() => ws.terminate(), 150);
+ }
+ );
+ });
+
+ it('can be called from an error listener while connecting', (done) => {
+ const ws = new WebSocket('ws://localhost:1337');
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(err.code, 'ECONNREFUSED');
+ ws.terminate();
+ ws.on('close', () => done());
+ });
+ }).timeout(4000);
+
+ it("can be called from a listener of the 'redirect' event", (done) => {
+ const server = http.createServer();
+
+ server.once('upgrade', (req, socket) => {
+ socket.end('HTTP/1.1 302 Found\r\nLocation: /foo\r\n\r\n');
+ });
+
+ server.listen(() => {
+ const port = server.address().port;
+ const ws = new WebSocket(`ws://localhost:${port}`, {
+ followRedirects: true
+ });
+
+ ws.on('open', () => {
+ done(new Error("Unexpected 'open' event"));
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1006);
+ server.close(done);
+ });
+ });
+
+ ws.on('redirect', () => {
+ ws.terminate();
+ });
+ });
+ });
+
+ it("can be called from a listener of the 'upgrade' event", (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => done(new Error("Unexpected 'open' event")));
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'WebSocket was closed before the connection was established'
+ );
+ ws.on('close', () => wss.close(done));
+ });
+ ws.on('upgrade', () => ws.terminate());
+ });
+ });
+
+ it('does nothing if `readyState` is `CLOSED`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(ws.readyState, WebSocket.CLOSED);
+ ws.terminate();
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => ws.terminate());
+ });
+ });
+
+ describe('WHATWG API emulation', () => {
+ it('supports the `on{close,error,message,open}` attributes', () => {
+ for (const property of ['onclose', 'onerror', 'onmessage', 'onopen']) {
+ const descriptor = Object.getOwnPropertyDescriptor(
+ WebSocket.prototype,
+ property
+ );
+
+ assert.strictEqual(descriptor.configurable, true);
+ assert.strictEqual(descriptor.enumerable, true);
+ assert.ok(descriptor.get !== undefined);
+ assert.ok(descriptor.set !== undefined);
+ }
+
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ assert.strictEqual(ws.onmessage, null);
+ assert.strictEqual(ws.onclose, null);
+ assert.strictEqual(ws.onerror, null);
+ assert.strictEqual(ws.onopen, null);
+
+ ws.onmessage = NOOP;
+ ws.onerror = NOOP;
+ ws.onclose = NOOP;
+ ws.onopen = NOOP;
+
+ assert.strictEqual(ws.onmessage, NOOP);
+ assert.strictEqual(ws.onclose, NOOP);
+ assert.strictEqual(ws.onerror, NOOP);
+ assert.strictEqual(ws.onopen, NOOP);
+
+ ws.onmessage = 'foo';
+
+ assert.strictEqual(ws.onmessage, null);
+ assert.strictEqual(ws.listenerCount('message'), 0);
+ });
+
+ it('works like the `EventEmitter` interface', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.onmessage = (messageEvent) => {
+ assert.strictEqual(messageEvent.data, 'foo');
+ ws.onclose = (closeEvent) => {
+ assert.strictEqual(closeEvent.wasClean, true);
+ assert.strictEqual(closeEvent.code, 1005);
+ assert.strictEqual(closeEvent.reason, '');
+ wss.close(done);
+ };
+ ws.close();
+ };
+
+ ws.onopen = () => ws.send('foo');
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ ws.send(msg, { binary: isBinary });
+ });
+ });
+ });
+
+ it("doesn't return listeners added with `on`", () => {
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ ws.on('open', NOOP);
+
+ assert.deepStrictEqual(ws.listeners('open'), [NOOP]);
+ assert.strictEqual(ws.onopen, null);
+ });
+
+ it("doesn't remove listeners added with `on`", () => {
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ ws.on('close', NOOP);
+ ws.onclose = NOOP;
+
+ let listeners = ws.listeners('close');
+
+ assert.strictEqual(listeners.length, 2);
+ assert.strictEqual(listeners[0], NOOP);
+ assert.strictEqual(listeners[1][kListener], NOOP);
+
+ ws.onclose = NOOP;
+
+ listeners = ws.listeners('close');
+
+ assert.strictEqual(listeners.length, 2);
+ assert.strictEqual(listeners[0], NOOP);
+ assert.strictEqual(listeners[1][kListener], NOOP);
+ });
+
+ it('supports the `addEventListener` method', () => {
+ const events = [];
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ ws.addEventListener('foo', () => {});
+ assert.strictEqual(ws.listenerCount('foo'), 0);
+
+ ws.addEventListener('open', () => {
+ events.push('open');
+ assert.strictEqual(ws.listenerCount('open'), 1);
+ });
+
+ assert.strictEqual(ws.listenerCount('open'), 1);
+
+ ws.addEventListener(
+ 'message',
+ () => {
+ events.push('message');
+ assert.strictEqual(ws.listenerCount('message'), 0);
+ },
+ { once: true }
+ );
+
+ assert.strictEqual(ws.listenerCount('message'), 1);
+
+ ws.emit('open');
+ ws.emit('message', EMPTY_BUFFER, false);
+
+ assert.deepStrictEqual(events, ['open', 'message']);
+ });
+
+ it("doesn't return listeners added with `addEventListener`", () => {
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ ws.addEventListener('open', NOOP);
+
+ const listeners = ws.listeners('open');
+
+ assert.strictEqual(listeners.length, 1);
+ assert.strictEqual(listeners[0][kListener], NOOP);
+
+ assert.strictEqual(ws.onopen, null);
+ });
+
+ it("doesn't remove listeners added with `addEventListener`", () => {
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ ws.addEventListener('close', NOOP);
+ ws.onclose = NOOP;
+
+ let listeners = ws.listeners('close');
+
+ assert.strictEqual(listeners.length, 2);
+ assert.strictEqual(listeners[0][kListener], NOOP);
+ assert.strictEqual(listeners[1][kListener], NOOP);
+
+ ws.onclose = NOOP;
+
+ listeners = ws.listeners('close');
+
+ assert.strictEqual(listeners.length, 2);
+ assert.strictEqual(listeners[0][kListener], NOOP);
+ assert.strictEqual(listeners[1][kListener], NOOP);
+ });
+
+ it('supports the `removeEventListener` method', () => {
+ const ws = new WebSocket('ws://localhost', { agent: new CustomAgent() });
+
+ ws.addEventListener('message', NOOP);
+ ws.addEventListener('open', NOOP);
+
+ assert.strictEqual(ws.listeners('message')[0][kListener], NOOP);
+ assert.strictEqual(ws.listeners('open')[0][kListener], NOOP);
+
+ ws.removeEventListener('message', () => {});
+
+ assert.strictEqual(ws.listeners('message')[0][kListener], NOOP);
+
+ ws.removeEventListener('message', NOOP);
+ ws.removeEventListener('open', NOOP);
+
+ assert.strictEqual(ws.listenerCount('message'), 0);
+ assert.strictEqual(ws.listenerCount('open'), 0);
+
+ ws.addEventListener('message', NOOP, { once: true });
+ ws.addEventListener('open', NOOP, { once: true });
+
+ assert.strictEqual(ws.listeners('message')[0][kListener], NOOP);
+ assert.strictEqual(ws.listeners('open')[0][kListener], NOOP);
+
+ ws.removeEventListener('message', () => {});
+
+ assert.strictEqual(ws.listeners('message')[0][kListener], NOOP);
+
+ ws.removeEventListener('message', NOOP);
+ ws.removeEventListener('open', NOOP);
+
+ assert.strictEqual(ws.listenerCount('message'), 0);
+ assert.strictEqual(ws.listenerCount('open'), 0);
+
+ // Multiple listeners.
+ ws.addEventListener('message', NOOP);
+ ws.addEventListener('message', NOOP);
+
+ assert.strictEqual(ws.listeners('message')[0][kListener], NOOP);
+ assert.strictEqual(ws.listeners('message')[1][kListener], NOOP);
+
+ ws.removeEventListener('message', NOOP);
+
+ assert.strictEqual(ws.listeners('message')[0][kListener], NOOP);
+
+ ws.removeEventListener('message', NOOP);
+
+ assert.strictEqual(ws.listenerCount('message'), 0);
+
+ // Listeners not added with `websocket.addEventListener()`.
+ ws.on('message', NOOP);
+
+ assert.deepStrictEqual(ws.listeners('message'), [NOOP]);
+
+ ws.removeEventListener('message', NOOP);
+
+ assert.deepStrictEqual(ws.listeners('message'), [NOOP]);
+
+ ws.onclose = NOOP;
+
+ assert.strictEqual(ws.listeners('close')[0][kListener], NOOP);
+
+ ws.removeEventListener('close', NOOP);
+
+ assert.strictEqual(ws.listeners('close')[0][kListener], NOOP);
+ });
+
+ it('wraps text data in a `MessageEvent`', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.addEventListener('open', () => {
+ ws.send('hi');
+ ws.close();
+ });
+
+ ws.addEventListener('message', (event) => {
+ assert.ok(event instanceof MessageEvent);
+ assert.strictEqual(event.data, 'hi');
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ ws.send(msg, { binary: isBinary });
+ });
+ });
+ });
+
+ it('receives a `CloseEvent` when server closes (1000)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.addEventListener('close', (event) => {
+ assert.ok(event instanceof CloseEvent);
+ assert.ok(event.wasClean);
+ assert.strictEqual(event.reason, '');
+ assert.strictEqual(event.code, 1000);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => ws.close(1000));
+ });
+
+ it('receives a `CloseEvent` when server closes (4000)', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.addEventListener('close', (event) => {
+ assert.ok(event instanceof CloseEvent);
+ assert.ok(event.wasClean);
+ assert.strictEqual(event.reason, 'some daft reason');
+ assert.strictEqual(event.code, 4000);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => ws.close(4000, 'some daft reason'));
+ });
+
+ it('sets `target` and `type` on events', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const err = new Error('forced');
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.addEventListener('open', (event) => {
+ assert.ok(event instanceof Event);
+ assert.strictEqual(event.type, 'open');
+ assert.strictEqual(event.target, ws);
+ });
+ ws.addEventListener('message', (event) => {
+ assert.ok(event instanceof MessageEvent);
+ assert.strictEqual(event.type, 'message');
+ assert.strictEqual(event.target, ws);
+ ws.close();
+ });
+ ws.addEventListener('close', (event) => {
+ assert.ok(event instanceof CloseEvent);
+ assert.strictEqual(event.type, 'close');
+ assert.strictEqual(event.target, ws);
+ ws.emit('error', err);
+ });
+ ws.addEventListener('error', (event) => {
+ assert.ok(event instanceof ErrorEvent);
+ assert.strictEqual(event.message, 'forced');
+ assert.strictEqual(event.type, 'error');
+ assert.strictEqual(event.target, ws);
+ assert.strictEqual(event.error, err);
+
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (client) => client.send('hi'));
+ });
+
+ it('passes binary data as a Node.js `Buffer` by default', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.onmessage = (evt) => {
+ assert.ok(Buffer.isBuffer(evt.data));
+ wss.close(done);
+ };
+ });
+
+ wss.on('connection', (ws) => {
+ ws.send(new Uint8Array(4096));
+ ws.close();
+ });
+ });
+
+ it('ignores `binaryType` for text messages', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.binaryType = 'arraybuffer';
+
+ ws.onmessage = (evt) => {
+ assert.strictEqual(evt.data, 'foo');
+ wss.close(done);
+ };
+ });
+
+ wss.on('connection', (ws) => {
+ ws.send('foo');
+ ws.close();
+ });
+ });
+
+ it('allows to update `binaryType` on the fly', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ function testType(binaryType, next) {
+ const buf = Buffer.from(binaryType);
+ ws.binaryType = binaryType;
+
+ ws.onmessage = (evt) => {
+ if (binaryType === 'nodebuffer') {
+ assert.ok(Buffer.isBuffer(evt.data));
+ assert.ok(evt.data.equals(buf));
+ } else if (binaryType === 'arraybuffer') {
+ assert.ok(evt.data instanceof ArrayBuffer);
+ assert.ok(Buffer.from(evt.data).equals(buf));
+ } else if (binaryType === 'fragments') {
+ assert.deepStrictEqual(evt.data, [buf]);
+ }
+ next();
+ };
+
+ ws.send(buf);
+ }
+
+ ws.onopen = () => {
+ testType('nodebuffer', () => {
+ testType('arraybuffer', () => {
+ testType('fragments', () => {
+ ws.close();
+ wss.close(done);
+ });
+ });
+ });
+ };
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (msg, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(msg);
+ });
+ });
+ });
+ });
+
+ describe('SSL', () => {
+ it('connects to secure websocket server', (done) => {
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('connection', () => {
+ server.close(done);
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`wss://127.0.0.1:${server.address().port}`, {
+ rejectUnauthorized: false
+ });
+
+ ws.on('open', ws.close);
+ });
+ });
+
+ it('connects to secure websocket server with client side certificate', (done) => {
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ ca: [fs.readFileSync('test/fixtures/ca-certificate.pem')],
+ key: fs.readFileSync('test/fixtures/key.pem'),
+ requestCert: true
+ });
+
+ const wss = new WebSocket.Server({ noServer: true });
+
+ server.on('upgrade', (request, socket, head) => {
+ assert.ok(socket.authorized);
+
+ wss.handleUpgrade(request, socket, head, (ws) => {
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1005);
+ server.close(done);
+ });
+ });
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`wss://localhost:${server.address().port}`, {
+ cert: fs.readFileSync('test/fixtures/client-certificate.pem'),
+ key: fs.readFileSync('test/fixtures/client-key.pem'),
+ rejectUnauthorized: false
+ });
+
+ ws.on('open', ws.close);
+ });
+ });
+
+ it('cannot connect to secure websocket server via ws://', (done) => {
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const wss = new WebSocket.Server({ server });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`ws://localhost:${server.address().port}`, {
+ rejectUnauthorized: false
+ });
+
+ ws.on('error', () => {
+ server.close(done);
+ wss.close();
+ });
+ });
+ });
+
+ it('can send and receive text data', (done) => {
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from('foobar'));
+ assert.ok(!isBinary);
+ server.close(done);
+ });
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`wss://localhost:${server.address().port}`, {
+ rejectUnauthorized: false
+ });
+
+ ws.on('open', () => {
+ ws.send('foobar');
+ ws.close();
+ });
+ });
+ });
+
+ it('can send a big binary message', (done) => {
+ const buf = crypto.randomBytes(5 * 1024 * 1024);
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem')
+ });
+ const wss = new WebSocket.Server({ server });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(message);
+ ws.close();
+ });
+ });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`wss://localhost:${server.address().port}`, {
+ rejectUnauthorized: false
+ });
+
+ ws.on('open', () => ws.send(buf));
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, buf);
+ assert.ok(isBinary);
+
+ server.close(done);
+ });
+ });
+ }).timeout(4000);
+
+ it('allows to disable sending the SNI extension', (done) => {
+ const original = tls.connect;
+
+ tls.connect = (options) => {
+ assert.strictEqual(options.servername, '');
+ tls.connect = original;
+ done();
+ };
+
+ const ws = new WebSocket('wss://127.0.0.1', { servername: '' });
+ });
+
+ it("works around a double 'error' event bug in Node.js", function (done) {
+ //
+ // The `minVersion` and `maxVersion` options are not supported in
+ // Node.js < 10.16.0.
+ //
+ if (process.versions.modules < 64) return this.skip();
+
+ //
+ // The `'error'` event can be emitted multiple times by the
+ // `http.ClientRequest` object in Node.js < 13. This test reproduces the
+ // issue in Node.js 12.
+ //
+ const server = https.createServer({
+ cert: fs.readFileSync('test/fixtures/certificate.pem'),
+ key: fs.readFileSync('test/fixtures/key.pem'),
+ minVersion: 'TLSv1.2'
+ });
+ const wss = new WebSocket.Server({ server });
+
+ server.listen(0, () => {
+ const ws = new WebSocket(`wss://localhost:${server.address().port}`, {
+ maxVersion: 'TLSv1.1',
+ rejectUnauthorized: false
+ });
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof Error);
+ server.close(done);
+ wss.close();
+ });
+ });
+ });
+ });
+
+ describe('Request headers', () => {
+ it('adds the authorization header if the url has userinfo', (done) => {
+ const agent = new CustomAgent();
+ const userinfo = 'test:testpass';
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('authorization'),
+ `Basic ${Buffer.from(userinfo).toString('base64')}`
+ );
+ done();
+ };
+
+ const ws = new WebSocket(`ws://${userinfo}@localhost`, { agent });
+ });
+
+ it('honors the `auth` option', (done) => {
+ const agent = new CustomAgent();
+ const auth = 'user:pass';
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('authorization'),
+ `Basic ${Buffer.from(auth).toString('base64')}`
+ );
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', { agent, auth });
+ });
+
+ it('favors the url userinfo over the `auth` option', (done) => {
+ const agent = new CustomAgent();
+ const auth = 'foo:bar';
+ const userinfo = 'baz:qux';
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('authorization'),
+ `Basic ${Buffer.from(userinfo).toString('base64')}`
+ );
+ done();
+ };
+
+ const ws = new WebSocket(`ws://${userinfo}@localhost`, { agent, auth });
+ });
+
+ it('adds custom headers', (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(req.getHeader('cookie'), 'foo=bar');
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', {
+ headers: { Cookie: 'foo=bar' },
+ agent
+ });
+ });
+
+ it('excludes default ports from host header', () => {
+ const options = { lookup() {} };
+ const variants = [
+ ['wss://localhost:8443', 'localhost:8443'],
+ ['wss://localhost:443', 'localhost'],
+ ['ws://localhost:88', 'localhost:88'],
+ ['ws://localhost:80', 'localhost']
+ ];
+
+ for (const [url, host] of variants) {
+ const ws = new WebSocket(url, options);
+ assert.strictEqual(ws._req.getHeader('host'), host);
+ }
+ });
+
+ it("doesn't add the origin header by default", (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(req.getHeader('origin'), undefined);
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', { agent });
+ });
+
+ it('honors the `origin` option (1/2)', (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(req.getHeader('origin'), 'https://example.com:8000');
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', {
+ origin: 'https://example.com:8000',
+ agent
+ });
+ });
+
+ it('honors the `origin` option (2/2)', (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('sec-websocket-origin'),
+ 'https://example.com:8000'
+ );
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', {
+ origin: 'https://example.com:8000',
+ protocolVersion: 8,
+ agent
+ });
+ });
+ });
+
+ describe('permessage-deflate', () => {
+ it('is enabled by default', (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('sec-websocket-extensions'),
+ 'permessage-deflate; client_max_window_bits'
+ );
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', { agent });
+ });
+
+ it('can be disabled', (done) => {
+ const agent = new CustomAgent();
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(
+ req.getHeader('sec-websocket-extensions'),
+ undefined
+ );
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', {
+ perMessageDeflate: false,
+ agent
+ });
+ });
+
+ it('can send extension parameters', (done) => {
+ const agent = new CustomAgent();
+
+ const value =
+ 'permessage-deflate; server_no_context_takeover;' +
+ ' client_no_context_takeover; server_max_window_bits=10;' +
+ ' client_max_window_bits';
+
+ agent.addRequest = (req) => {
+ assert.strictEqual(req.getHeader('sec-websocket-extensions'), value);
+ done();
+ };
+
+ const ws = new WebSocket('ws://localhost', {
+ perMessageDeflate: {
+ clientNoContextTakeover: true,
+ serverNoContextTakeover: true,
+ clientMaxWindowBits: true,
+ serverMaxWindowBits: 10
+ },
+ agent
+ });
+ });
+
+ it('consumes all received data when connection is closed (1/2)', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { threshold: 0 },
+ port: 0
+ },
+ () => {
+ const messages = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws._socket.on('close', () => {
+ assert.strictEqual(ws._receiver._state, 5);
+ });
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.ok(!isBinary);
+ messages.push(message.toString());
+ });
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1006);
+ assert.deepStrictEqual(messages, ['foo', 'bar', 'baz', 'qux']);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.send('foo');
+ ws.send('bar');
+ ws.send('baz');
+ ws.send('qux', () => ws._socket.end());
+ });
+ });
+
+ it('consumes all received data when connection is closed (2/2)', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const messageLengths = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws._socket.prependListener('close', () => {
+ assert.strictEqual(ws._receiver._state, 5);
+ assert.strictEqual(ws._socket._readableState.length, 3);
+ });
+
+ const push = ws._socket.push;
+
+ // Override `ws._socket.push()` to know exactly when data is
+ // received and call `ws.terminate()` immediately after that without
+ // relying on a timer.
+ ws._socket.push = (data) => {
+ ws._socket.push = push;
+ ws._socket.push(data);
+ ws.terminate();
+ };
+
+ const payload1 = Buffer.alloc(15 * 1024);
+ const payload2 = Buffer.alloc(1);
+
+ const opts = {
+ fin: true,
+ opcode: 0x02,
+ mask: false,
+ readOnly: false
+ };
+
+ const list = [
+ ...Sender.frame(payload1, { rsv1: false, ...opts }),
+ ...Sender.frame(payload2, { rsv1: true, ...opts })
+ ];
+
+ for (let i = 0; i < 399; i++) {
+ list.push(list[list.length - 2], list[list.length - 1]);
+ }
+
+ // This hack is used because there is no guarantee that more than
+ // 16 KiB will be sent as a single TCP packet.
+ push.call(ws._socket, Buffer.concat(list));
+
+ wss.clients
+ .values()
+ .next()
+ .value.send(payload2, { compress: false });
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.ok(isBinary);
+ messageLengths.push(message.length);
+ });
+
+ ws.on('close', (code) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(messageLengths.length, 402);
+ assert.strictEqual(messageLengths[0], 15360);
+ assert.strictEqual(messageLengths[messageLengths.length - 1], 1);
+ wss.close(done);
+ });
+ }
+ );
+ });
+
+ it('handles a close frame received while compressing data', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws._receiver.on('conclude', () => {
+ assert.ok(ws._sender._deflating);
+ });
+
+ ws.send('foo');
+ ws.send('bar');
+ ws.send('baz');
+ ws.send('qux');
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ const messages = [];
+
+ ws.on('message', (message, isBinary) => {
+ assert.ok(!isBinary);
+ messages.push(message.toString());
+ });
+
+ ws.on('close', (code, reason) => {
+ assert.deepStrictEqual(messages, ['foo', 'bar', 'baz', 'qux']);
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+
+ ws.close(1000);
+ });
+ });
+
+ describe('#close', () => {
+ it('can be used while data is being decompressed', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const messages = [];
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('open', () => {
+ ws._socket.on('end', () => {
+ assert.strictEqual(ws._receiver._state, 5);
+ });
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.ok(!isBinary);
+
+ if (messages.push(message.toString()) > 1) return;
+
+ ws.close(1000);
+ });
+
+ ws.on('close', (code, reason) => {
+ assert.deepStrictEqual(messages, ['', '', '', '']);
+ assert.strictEqual(code, 1000);
+ assert.deepStrictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ const buf = Buffer.from('c10100c10100c10100c10100', 'hex');
+ ws._socket.write(buf);
+ });
+ });
+ });
+
+ describe('#send', () => {
+ it('can send text data', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { threshold: 0 },
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws.send('hi', { compress: true });
+ ws.close();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from('hi'));
+ assert.ok(!isBinary);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ ws.send(message, { binary: isBinary, compress: true });
+ });
+ });
+ });
+
+ it('can send a `TypedArray`', (done) => {
+ const array = new Float32Array(5);
+
+ for (let i = 0; i < array.length; i++) {
+ array[i] = i / 2;
+ }
+
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { threshold: 0 },
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws.send(array, { compress: true });
+ ws.close();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from(array.buffer));
+ assert.ok(isBinary);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(message, { compress: true });
+ });
+ });
+ });
+
+ it('can send an `ArrayBuffer`', (done) => {
+ const array = new Float32Array(5);
+
+ for (let i = 0; i < array.length; i++) {
+ array[i] = i / 2;
+ }
+
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { threshold: 0 },
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws.send(array.buffer, { compress: true });
+ ws.close();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from(array.buffer));
+ assert.ok(isBinary);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ assert.ok(isBinary);
+ ws.send(message, { compress: true });
+ });
+ });
+ });
+
+ it('ignores the `compress` option if the extension is disabled', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: false
+ });
+
+ ws.on('open', () => {
+ ws.send('hi', { compress: true });
+ ws.close();
+ });
+
+ ws.on('message', (message, isBinary) => {
+ assert.deepStrictEqual(message, Buffer.from('hi'));
+ assert.ok(!isBinary);
+ wss.close(done);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('message', (message, isBinary) => {
+ ws.send(message, { binary: isBinary, compress: true });
+ });
+ });
+ });
+
+ it('calls the callback if the socket is closed prematurely', (done) => {
+ const called = [];
+ const wss = new WebSocket.Server(
+ { perMessageDeflate: true, port: 0 },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws.send('foo');
+ ws.send('bar', (err) => {
+ called.push(1);
+
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'The socket was closed while data was being compressed'
+ );
+ });
+ ws.send('baz');
+ ws.send('qux', (err) => {
+ called.push(2);
+
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'The socket was closed while data was being compressed'
+ );
+ });
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ ws.on('close', () => {
+ assert.deepStrictEqual(called, [1, 2]);
+ wss.close(done);
+ });
+
+ ws._socket.end();
+ });
+ });
+ });
+
+ describe('#terminate', () => {
+ it('can be used while data is being compressed', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: { threshold: 0 },
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`, {
+ perMessageDeflate: { threshold: 0 }
+ });
+
+ ws.on('open', () => {
+ ws.send('hi', (err) => {
+ assert.strictEqual(ws.readyState, WebSocket.CLOSING);
+ assert.ok(err instanceof Error);
+ assert.strictEqual(
+ err.message,
+ 'The socket was closed while data was being compressed'
+ );
+
+ ws.on('close', () => {
+ wss.close(done);
+ });
+ });
+ ws.terminate();
+ });
+ }
+ );
+ });
+
+ it('can be used while data is being decompressed', (done) => {
+ const wss = new WebSocket.Server(
+ {
+ perMessageDeflate: true,
+ port: 0
+ },
+ () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ const messages = [];
+
+ ws.on('message', (message, isBinary) => {
+ assert.ok(!isBinary);
+
+ if (messages.push(message.toString()) > 1) return;
+
+ process.nextTick(() => {
+ assert.strictEqual(ws._receiver._state, 5);
+ ws.terminate();
+ });
+ });
+
+ ws.on('close', (code, reason) => {
+ assert.deepStrictEqual(messages, ['', '', '', '']);
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ }
+ );
+
+ wss.on('connection', (ws) => {
+ const buf = Buffer.from('c10100c10100c10100c10100', 'hex');
+ ws._socket.write(buf);
+ });
+ });
+ });
+ });
+
+ describe('Connection close', () => {
+ it('closes cleanly after simultaneous errors (1/2)', (done) => {
+ let clientCloseEventEmitted = false;
+ let serverClientCloseEventEmitted = false;
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+
+ clientCloseEventEmitted = true;
+ if (serverClientCloseEventEmitted) wss.close(done);
+ });
+ });
+
+ ws.on('open', () => {
+ // Write an invalid frame in both directions to trigger simultaneous
+ // failure.
+ const chunk = Buffer.from([0x85, 0x00]);
+
+ wss.clients.values().next().value._socket.write(chunk);
+ ws._socket.write(chunk);
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+
+ serverClientCloseEventEmitted = true;
+ if (clientCloseEventEmitted) wss.close(done);
+ });
+ });
+ });
+ });
+
+ it('closes cleanly after simultaneous errors (2/2)', (done) => {
+ let clientCloseEventEmitted = false;
+ let serverClientCloseEventEmitted = false;
+
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+
+ clientCloseEventEmitted = true;
+ if (serverClientCloseEventEmitted) wss.close(done);
+ });
+ });
+
+ ws.on('open', () => {
+ // Write an invalid frame in both directions and change the
+ // `readyState` to `WebSocket.CLOSING`.
+ const chunk = Buffer.from([0x85, 0x00]);
+ const serverWs = wss.clients.values().next().value;
+
+ serverWs._socket.write(chunk);
+ serverWs.close();
+
+ ws._socket.write(chunk);
+ ws.close();
+ });
+ });
+
+ wss.on('connection', (ws) => {
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_INVALID_OPCODE');
+ assert.strictEqual(
+ err.message,
+ 'Invalid WebSocket frame: invalid opcode 5'
+ );
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+
+ serverClientCloseEventEmitted = true;
+ if (clientCloseEventEmitted) wss.close(done);
+ });
+ });
+ });
+ });
+
+ it('resumes the socket when an error occurs', (done) => {
+ const maxPayload = 16 * 1024;
+ const wss = new WebSocket.Server({ maxPayload, port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ const list = [
+ ...Sender.frame(Buffer.alloc(maxPayload + 1), {
+ fin: true,
+ opcode: 0x02,
+ mask: true,
+ readOnly: false
+ })
+ ];
+
+ ws.on('error', (err) => {
+ assert.ok(err instanceof RangeError);
+ assert.strictEqual(err.code, 'WS_ERR_UNSUPPORTED_MESSAGE_LENGTH');
+ assert.strictEqual(err.message, 'Max payload size exceeded');
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1006);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+ });
+
+ ws._socket.push(Buffer.concat(list));
+ });
+ });
+
+ it('resumes the socket when the close frame is received', (done) => {
+ const wss = new WebSocket.Server({ port: 0 }, () => {
+ const ws = new WebSocket(`ws://localhost:${wss.address().port}`);
+ });
+
+ wss.on('connection', (ws) => {
+ const opts = { fin: true, mask: true, readOnly: false };
+ const list = [
+ ...Sender.frame(Buffer.alloc(16 * 1024), { opcode: 0x02, ...opts }),
+ ...Sender.frame(EMPTY_BUFFER, { opcode: 0x08, ...opts })
+ ];
+
+ ws.on('close', (code, reason) => {
+ assert.strictEqual(code, 1005);
+ assert.strictEqual(reason, EMPTY_BUFFER);
+ wss.close(done);
+ });
+
+ ws._socket.push(Buffer.concat(list));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-ws/wrapper.mjs b/testing/xpcshell/node-ws/wrapper.mjs
new file mode 100644
index 0000000000..7245ad15d0
--- /dev/null
+++ b/testing/xpcshell/node-ws/wrapper.mjs
@@ -0,0 +1,8 @@
+import createWebSocketStream from './lib/stream.js';
+import Receiver from './lib/receiver.js';
+import Sender from './lib/sender.js';
+import WebSocket from './lib/websocket.js';
+import WebSocketServer from './lib/websocket-server.js';
+
+export { createWebSocketStream, Receiver, Sender, WebSocket, WebSocketServer };
+export default WebSocket;
diff --git a/testing/xpcshell/odoh-wasm/Cargo.toml b/testing/xpcshell/odoh-wasm/Cargo.toml
new file mode 100644
index 0000000000..f709e66139
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/Cargo.toml
@@ -0,0 +1,42 @@
+[workspace]
+
+[package]
+name = "odoh-wasm"
+version = "0.1.0"
+authors = ["Kershaw Chang <kershaw@mozilla.com>"]
+edition = "2018"
+
+[lib]
+crate-type = ["cdylib", "rlib"]
+
+[features]
+default = ["console_error_panic_hook"]
+
+[dependencies]
+wasm-bindgen = "0.2.63"
+odoh-rs = "=0.1.10"
+hpke = "=0.5.0"
+js-sys = "0.3"
+hex = "0.4"
+futures = "0.3.1"
+rand = "=0.7"
+
+# The `console_error_panic_hook` crate provides better debugging of panics by
+# logging them with `console.error`. This is great for development, but requires
+# all the `std::fmt` and `std::panicking` infrastructure, so isn't great for
+# code size when deploying.
+console_error_panic_hook = { version = "0.1.6", optional = true }
+
+# `wee_alloc` is a tiny allocator for wasm that is only ~1K in code size
+# compared to the default allocator's ~10K. It is slower than the default
+# allocator, however.
+#
+# Unfortunately, `wee_alloc` requires nightly Rust when targeting wasm for now.
+wee_alloc = { version = "0.4.5", optional = true }
+
+[dev-dependencies]
+wasm-bindgen-test = "0.3.13"
+
+[profile.release]
+# Tell `rustc` to optimize for small code size.
+opt-level = "s"
diff --git a/testing/xpcshell/odoh-wasm/LICENSE_APACHE b/testing/xpcshell/odoh-wasm/LICENSE_APACHE
new file mode 100644
index 0000000000..1b5ec8b78e
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/LICENSE_APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/testing/xpcshell/odoh-wasm/LICENSE_MIT b/testing/xpcshell/odoh-wasm/LICENSE_MIT
new file mode 100644
index 0000000000..681a58f76e
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/LICENSE_MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2018 Kershaw Chang <kershaw@mozilla.com>
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/testing/xpcshell/odoh-wasm/README.md b/testing/xpcshell/odoh-wasm/README.md
new file mode 100644
index 0000000000..d3e413bfec
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/README.md
@@ -0,0 +1,75 @@
+<div align="center">
+
+ <h1><code>wasm-pack-template</code></h1>
+
+ <strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong>
+
+ <p>
+ <a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a>
+ </p>
+
+ <h3>
+ <a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a>
+ <span> | </span>
+ <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a>
+ </h3>
+
+ <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub>
+</div>
+
+## About
+
+[**📚 Read this template tutorial! 📚**][template-docs]
+
+This template is designed for compiling Rust libraries into WebAssembly and
+publishing the resulting package to NPM.
+
+Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other
+templates and usages of `wasm-pack`.
+
+[tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html
+[template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html
+
+## 🚴 Usage
+
+### 🐑 Use `cargo generate` to Clone this Template
+
+[Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate)
+
+```
+cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project
+cd my-project
+```
+
+### 🛠️ Build with `wasm-pack build`
+
+```
+wasm-pack build
+```
+
+### 🛠️ Build a module that can be used for nodejs
+
+```
+wasm-pack build --target nodejs
+```
+
+### 🔬 Test in Headless Browsers with `wasm-pack test`
+
+```
+wasm-pack test --headless --firefox
+```
+
+### 🎁 Publish to NPM with `wasm-pack publish`
+
+```
+wasm-pack publish
+```
+
+## 🔋 Batteries Included
+
+* [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating
+ between WebAssembly and JavaScript.
+* [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook)
+ for logging panic messages to the developer console.
+* [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized
+ for small code size.
diff --git a/testing/xpcshell/odoh-wasm/pkg/README.md b/testing/xpcshell/odoh-wasm/pkg/README.md
new file mode 100644
index 0000000000..1e4617a6d2
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/pkg/README.md
@@ -0,0 +1,69 @@
+<div align="center">
+
+ <h1><code>wasm-pack-template</code></h1>
+
+ <strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong>
+
+ <p>
+ <a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a>
+ </p>
+
+ <h3>
+ <a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a>
+ <span> | </span>
+ <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a>
+ </h3>
+
+ <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub>
+</div>
+
+## About
+
+[**📚 Read this template tutorial! 📚**][template-docs]
+
+This template is designed for compiling Rust libraries into WebAssembly and
+publishing the resulting package to NPM.
+
+Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other
+templates and usages of `wasm-pack`.
+
+[tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html
+[template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html
+
+## 🚴 Usage
+
+### 🐑 Use `cargo generate` to Clone this Template
+
+[Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate)
+
+```
+cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project
+cd my-project
+```
+
+### 🛠️ Build with `wasm-pack build`
+
+```
+wasm-pack build
+```
+
+### 🔬 Test in Headless Browsers with `wasm-pack test`
+
+```
+wasm-pack test --headless --firefox
+```
+
+### 🎁 Publish to NPM with `wasm-pack publish`
+
+```
+wasm-pack publish
+```
+
+## 🔋 Batteries Included
+
+* [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating
+ between WebAssembly and JavaScript.
+* [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook)
+ for logging panic messages to the developer console.
+* [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized
+ for small code size.
diff --git a/testing/xpcshell/odoh-wasm/pkg/odoh_wasm.d.ts b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm.d.ts
new file mode 100644
index 0000000000..1f90ef6591
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm.d.ts
@@ -0,0 +1,16 @@
+/* tslint:disable */
+/* eslint-disable */
+/**
+* @returns {Uint8Array}
+*/
+export function get_odoh_config(): Uint8Array;
+/**
+* @param {Uint8Array} odoh_encrypted_query_msg
+* @returns {Uint8Array}
+*/
+export function decrypt_query(odoh_encrypted_query_msg: Uint8Array): Uint8Array;
+/**
+* @param {Uint8Array} response
+* @returns {Uint8Array}
+*/
+export function create_response(response: Uint8Array): Uint8Array;
diff --git a/testing/xpcshell/odoh-wasm/pkg/odoh_wasm.js b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm.js
new file mode 100644
index 0000000000..14b97d7436
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm.js
@@ -0,0 +1,132 @@
+let imports = {};
+imports['__wbindgen_placeholder__'] = module.exports;
+let wasm;
+const { TextDecoder } = require(`util`);
+
+const heap = new Array(32).fill(undefined);
+
+heap.push(undefined, null, true, false);
+
+function getObject(idx) { return heap[idx]; }
+
+let heap_next = heap.length;
+
+function dropObject(idx) {
+ if (idx < 36) return;
+ heap[idx] = heap_next;
+ heap_next = idx;
+}
+
+function takeObject(idx) {
+ const ret = getObject(idx);
+ dropObject(idx);
+ return ret;
+}
+
+let cachedTextDecoder = new TextDecoder('utf-8', { ignoreBOM: true, fatal: true });
+
+cachedTextDecoder.decode();
+
+let cachegetUint8Memory0 = null;
+function getUint8Memory0() {
+ if (cachegetUint8Memory0 === null || cachegetUint8Memory0.buffer !== wasm.memory.buffer) {
+ cachegetUint8Memory0 = new Uint8Array(wasm.memory.buffer);
+ }
+ return cachegetUint8Memory0;
+}
+
+function getStringFromWasm0(ptr, len) {
+ return cachedTextDecoder.decode(getUint8Memory0().subarray(ptr, ptr + len));
+}
+
+function addHeapObject(obj) {
+ if (heap_next === heap.length) heap.push(heap.length + 1);
+ const idx = heap_next;
+ heap_next = heap[idx];
+
+ heap[idx] = obj;
+ return idx;
+}
+/**
+* @returns {Uint8Array}
+*/
+module.exports.get_odoh_config = function() {
+ var ret = wasm.get_odoh_config();
+ return takeObject(ret);
+};
+
+let WASM_VECTOR_LEN = 0;
+
+function passArray8ToWasm0(arg, malloc) {
+ const ptr = malloc(arg.length * 1);
+ getUint8Memory0().set(arg, ptr / 1);
+ WASM_VECTOR_LEN = arg.length;
+ return ptr;
+}
+/**
+* @param {Uint8Array} odoh_encrypted_query_msg
+* @returns {Uint8Array}
+*/
+module.exports.decrypt_query = function(odoh_encrypted_query_msg) {
+ var ptr0 = passArray8ToWasm0(odoh_encrypted_query_msg, wasm.__wbindgen_malloc);
+ var len0 = WASM_VECTOR_LEN;
+ var ret = wasm.decrypt_query(ptr0, len0);
+ return takeObject(ret);
+};
+
+/**
+* @param {Uint8Array} response
+* @returns {Uint8Array}
+*/
+module.exports.create_response = function(response) {
+ var ptr0 = passArray8ToWasm0(response, wasm.__wbindgen_malloc);
+ var len0 = WASM_VECTOR_LEN;
+ var ret = wasm.create_response(ptr0, len0);
+ return takeObject(ret);
+};
+
+module.exports.__wbindgen_object_drop_ref = function(arg0) {
+ takeObject(arg0);
+};
+
+module.exports.__wbg_log_b3f203d9e6882397 = function(arg0, arg1) {
+ console.log(getStringFromWasm0(arg0, arg1));
+};
+
+module.exports.__wbg_buffer_eb2155f17856c20b = function(arg0) {
+ var ret = getObject(arg0).buffer;
+ return addHeapObject(ret);
+};
+
+module.exports.__wbg_newwithbyteoffsetandlength_7d07f77c6d0d8e26 = function(arg0, arg1, arg2) {
+ var ret = new Uint8Array(getObject(arg0), arg1 >>> 0, arg2 >>> 0);
+ return addHeapObject(ret);
+};
+
+module.exports.__wbg_new_ff8b26f7b2d7e2fb = function(arg0) {
+ var ret = new Uint8Array(getObject(arg0));
+ return addHeapObject(ret);
+};
+
+module.exports.__wbg_newwithlength_a49b32b2030b93c3 = function(arg0) {
+ var ret = new Uint8Array(arg0 >>> 0);
+ return addHeapObject(ret);
+};
+
+module.exports.__wbindgen_throw = function(arg0, arg1) {
+ throw new Error(getStringFromWasm0(arg0, arg1));
+};
+
+module.exports.__wbindgen_memory = function() {
+ var ret = wasm.memory;
+ return addHeapObject(ret);
+};
+
+const path = require('path').join(__dirname, 'odoh_wasm_bg.wasm');
+const bytes = require('fs').readFileSync(path);
+
+const wasmModule = new WebAssembly.Module(bytes);
+const wasmInstance = new WebAssembly.Instance(wasmModule, imports);
+wasm = wasmInstance.exports;
+module.exports.__wasm = wasm;
+
diff --git a/testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm
new file mode 100644
index 0000000000..ddca009ed4
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm
Binary files differ
diff --git a/testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm.d.ts b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm.d.ts
new file mode 100644
index 0000000000..e3a939f2e0
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/pkg/odoh_wasm_bg.wasm.d.ts
@@ -0,0 +1,7 @@
+/* tslint:disable */
+/* eslint-disable */
+export const memory: WebAssembly.Memory;
+export function get_odoh_config(): number;
+export function decrypt_query(a: number, b: number): number;
+export function create_response(a: number, b: number): number;
+export function __wbindgen_malloc(a: number): number;
diff --git a/testing/xpcshell/odoh-wasm/pkg/package.json b/testing/xpcshell/odoh-wasm/pkg/package.json
new file mode 100644
index 0000000000..e6db000676
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/pkg/package.json
@@ -0,0 +1,15 @@
+{
+ "name": "odoh-wasm",
+ "collaborators": [
+ "Kershaw Chang <kershaw@mozilla.com>"
+ ],
+ "version": "0.1.0",
+ "files": [
+ "odoh_wasm_bg.wasm",
+ "odoh_wasm.js",
+ "odoh_wasm_bg.js",
+ "odoh_wasm.d.ts"
+ ],
+ "main": "odoh_wasm.js",
+ "types": "odoh_wasm.d.ts"
+} \ No newline at end of file
diff --git a/testing/xpcshell/odoh-wasm/src/lib.rs b/testing/xpcshell/odoh-wasm/src/lib.rs
new file mode 100644
index 0000000000..8f94d2b567
--- /dev/null
+++ b/testing/xpcshell/odoh-wasm/src/lib.rs
@@ -0,0 +1,158 @@
+use hpke::{
+ kem::X25519HkdfSha256,
+ Kem as KemTrait, Serializable,
+};
+
+use odoh_rs::protocol::{
+ create_response_msg, parse_received_query,
+ ObliviousDoHConfigContents, ObliviousDoHKeyPair,
+ ObliviousDoHQueryBody,
+};
+
+use futures::executor;
+use hex;
+use wasm_bindgen::prelude::*;
+
+pub type Kem = X25519HkdfSha256;
+
+// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
+// allocator.
+#[cfg(feature = "wee_alloc")]
+#[global_allocator]
+static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
+
+pub const ODOH_VERSION: u16 = 0x0001;
+const KEM_ID: u16 = 0x0020;
+const KDF_ID: u16 = 0x0001;
+const AEAD_ID: u16 = 0x0001;
+
+// random bytes, should be 32 bytes for X25519 keys
+pub const IKM: &str = "871389a8727130974e3eb3ee528d440a871389a8727130974e3eb3ee528d440a";
+
+#[wasm_bindgen]
+extern "C" {
+ // Use `js_namespace` here to bind `console.log(..)` instead of just
+ // `log(..)`
+ #[wasm_bindgen(js_namespace = console)]
+ fn log(s: &str);
+
+ // The `console.log` is quite polymorphic, so we can bind it with multiple
+ // signatures. Note that we need to use `js_name` to ensure we always call
+ // `log` in JS.
+ #[wasm_bindgen(js_namespace = console, js_name = log)]
+ fn log_u32(a: u32);
+
+ // Multiple arguments too!
+ #[wasm_bindgen(js_namespace = console, js_name = log)]
+ fn log_many(a: &str, b: &str);
+}
+
+macro_rules! console_log {
+ // Note that this is using the `log` function imported above during
+ // `bare_bones`
+ ($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
+}
+
+fn generate_key_pair() -> ObliviousDoHKeyPair {
+ let ikm_bytes = hex::decode(IKM).unwrap();
+ let (secret_key, public_key) = Kem::derive_keypair(&ikm_bytes);
+ let public_key_bytes = public_key.to_bytes().to_vec();
+ let odoh_public_key = ObliviousDoHConfigContents {
+ kem_id: KEM_ID,
+ kdf_id: KDF_ID,
+ aead_id: AEAD_ID,
+ public_key: public_key_bytes,
+ };
+ ObliviousDoHKeyPair {
+ private_key: secret_key,
+ public_key: odoh_public_key,
+ }
+}
+
+#[wasm_bindgen]
+pub fn get_odoh_config() -> js_sys::Uint8Array {
+ let key_pair = generate_key_pair();
+ let public_key_bytes = key_pair.public_key.public_key;
+ let length_bytes = (public_key_bytes.len() as u16).to_be_bytes();
+ let odoh_config_length = 12 + public_key_bytes.len();
+ let version = ODOH_VERSION;
+ let odoh_contents_length = 8 + public_key_bytes.len();
+ let kem_id = KEM_ID; // DHKEM(X25519, HKDF-SHA256)
+ let kdf_id = KDF_ID; // KDF(SHA-256)
+ let aead_id = AEAD_ID; // AEAD(AES-GCM-128)
+ let mut result = vec![];
+ result.extend(&((odoh_config_length as u16).to_be_bytes()));
+ result.extend(&((version as u16).to_be_bytes()));
+ result.extend(&((odoh_contents_length as u16).to_be_bytes()));
+ result.extend(&((kem_id as u16).to_be_bytes()));
+ result.extend(&((kdf_id as u16).to_be_bytes()));
+ result.extend(&((aead_id as u16).to_be_bytes()));
+ result.extend(&length_bytes);
+ result.extend(&public_key_bytes);
+ return js_sys::Uint8Array::from(&result[..]);
+}
+
+static mut QUERY_BODY: Option<ObliviousDoHQueryBody> = None;
+static mut SERVER_SECRET: Option<Vec<u8>> = None;
+
+#[wasm_bindgen]
+pub fn decrypt_query(
+ odoh_encrypted_query_msg: &[u8],
+) -> js_sys::Uint8Array {
+ let mut result = vec![];
+ unsafe {
+ let key_pair = generate_key_pair();
+ let parsed_res =
+ executor::block_on(parse_received_query(&key_pair, &odoh_encrypted_query_msg));
+ let (parsed_query, secret) = match parsed_res {
+ Ok(t) => (t.0, t.1),
+ Err(_) => {
+ console_log!("parse_received_query failed!");
+ return js_sys::Uint8Array::new_with_length(0)
+ },
+ };
+
+ result.extend(&parsed_query.dns_msg);
+
+ QUERY_BODY = Some(parsed_query);
+ SERVER_SECRET = Some(secret);
+ }
+
+ return js_sys::Uint8Array::from(&result[..]);
+}
+
+#[wasm_bindgen]
+pub fn create_response(
+ response: &[u8],
+) -> js_sys::Uint8Array {
+ unsafe {
+ if let Some(body) = &QUERY_BODY {
+ if let Some(secret) = &SERVER_SECRET {
+ // random bytes
+ let nonce = vec![0x1b, 0xff, 0xfd, 0xff, 0x1a, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xe];
+ let result = executor::block_on(create_response_msg(
+ &secret,
+ &response,
+ None,
+ Some(nonce),
+ &body,
+ ));
+ let generated_response = match result {
+ Ok(r) => r,
+ Err(_) => {
+ console_log!("create_response_msg failed!");
+ return js_sys::Uint8Array::new_with_length(0);
+ }
+ };
+
+ QUERY_BODY = None;
+ SERVER_SECRET = None;
+ return js_sys::Uint8Array::from(&generated_response[..]);
+ }
+ }
+ }
+
+ console_log!("create_response_msg failed!");
+ return js_sys::Uint8Array::new_with_length(0);
+}
diff --git a/testing/xpcshell/remotexpcshelltests.py b/testing/xpcshell/remotexpcshelltests.py
new file mode 100644
index 0000000000..6dd40f15f9
--- /dev/null
+++ b/testing/xpcshell/remotexpcshelltests.py
@@ -0,0 +1,791 @@
+#!/usr/bin/env python
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import datetime
+import os
+import posixpath
+import shutil
+import sys
+import tempfile
+import time
+import uuid
+from argparse import Namespace
+from zipfile import ZipFile
+
+import mozcrash
+import mozdevice
+import mozfile
+import mozinfo
+import runxpcshelltests as xpcshell
+import six
+from mozdevice import ADBDevice, ADBDeviceFactory, ADBTimeoutError
+from mozlog import commandline
+from xpcshellcommandline import parser_remote
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+
+class RemoteProcessMonitor(object):
+ processStatus = []
+
+ def __init__(self, package, device, log, remoteLogFile):
+ self.package = package
+ self.device = device
+ self.log = log
+ self.remoteLogFile = remoteLogFile
+ self.selectedProcess = -1
+
+ @classmethod
+ def pickUnusedProcess(cls):
+ for i in range(len(cls.processStatus)):
+ if not cls.processStatus[i]:
+ cls.processStatus[i] = True
+ return i
+ # No more free processes :(
+ return -1
+
+ @classmethod
+ def freeProcess(cls, processId):
+ cls.processStatus[processId] = False
+
+ def kill(self):
+ self.device.pkill(self.process_name, sig=9, attempts=1)
+
+ def launch_service(self, extra_args, env, selectedProcess, test_name=None):
+ if not self.device.process_exist(self.package):
+ # Make sure the main app is running, this should help making the
+ # tests get foreground priority scheduling.
+ self.device.launch_activity(
+ self.package,
+ intent="org.mozilla.geckoview.test_runner.XPCSHELL_TEST_MAIN",
+ activity_name="TestRunnerActivity",
+ e10s=True,
+ )
+ # Newer Androids require that background services originate from
+ # active apps, so wait here until the test runner is the top
+ # activity.
+ retries = 20
+ top = self.device.get_top_activity(timeout=60)
+ while top != self.package and retries > 0:
+ self.log.info(
+ "%s | Checking that %s is the top activity."
+ % (test_name, self.package)
+ )
+ top = self.device.get_top_activity(timeout=60)
+ time.sleep(1)
+ retries -= 1
+
+ self.process_name = self.package + (":xpcshell%d" % selectedProcess)
+
+ retries = 20
+ while retries > 0 and self.device.process_exist(self.process_name):
+ self.log.info(
+ "%s | %s | Killing left-over process %s"
+ % (test_name, self.pid, self.process_name)
+ )
+ self.kill()
+ time.sleep(1)
+ retries -= 1
+
+ if self.device.process_exist(self.process_name):
+ raise Exception(
+ "%s | %s | Could not kill left-over process" % (test_name, self.pid)
+ )
+
+ self.device.launch_service(
+ self.package,
+ activity_name=("XpcshellTestRunnerService$i%d" % selectedProcess),
+ e10s=True,
+ moz_env=env,
+ grant_runtime_permissions=False,
+ extra_args=extra_args,
+ out_file=self.remoteLogFile,
+ )
+ return self.pid
+
+ def wait(self, timeout, interval=0.1, test_name=None):
+ timer = 0
+ status = True
+
+ # wait for log creation on startup
+ retries = 0
+ while retries < 20 / interval and not self.device.is_file(self.remoteLogFile):
+ retries += 1
+ time.sleep(interval)
+ if not self.device.is_file(self.remoteLogFile):
+ self.log.warning(
+ "%s | Failed wait for remote log: %s missing?"
+ % (test_name, self.remoteLogFile)
+ )
+
+ while self.device.process_exist(self.process_name):
+ time.sleep(interval)
+ timer += interval
+ interval *= 1.5
+ if timeout and timer > timeout:
+ status = False
+ self.log.info(
+ "remotexpcshelltests.py | %s | %s | Timing out"
+ % (test_name, str(self.pid))
+ )
+ self.kill()
+ break
+ return status
+
+ @property
+ def pid(self):
+ """
+ Determine the pid of the remote process (or the first process with
+ the same name).
+ """
+ procs = self.device.get_process_list()
+ # limit the comparison to the first 75 characters due to a
+ # limitation in processname length in android.
+ pids = [proc[0] for proc in procs if proc[1] == self.process_name[:75]]
+ if pids is None or len(pids) < 1:
+ return 0
+ return pids[0]
+
+
+class RemoteXPCShellTestThread(xpcshell.XPCShellTestThread):
+ def __init__(self, *args, **kwargs):
+ xpcshell.XPCShellTestThread.__init__(self, *args, **kwargs)
+
+ self.shellReturnCode = None
+ # embed the mobile params from the harness into the TestThread
+ mobileArgs = kwargs.get("mobileArgs")
+ for key in mobileArgs:
+ setattr(self, key, mobileArgs[key])
+ self.remoteLogFile = posixpath.join(
+ mobileArgs["remoteLogFolder"], "xpcshell-%s.log" % str(uuid.uuid4())
+ )
+
+ def initDir(self, path, mask="777", timeout=None):
+ """Initialize a directory by removing it if it exists, creating it
+ and changing the permissions."""
+ self.device.rm(path, recursive=True, force=True, timeout=timeout)
+ self.device.mkdir(path, parents=True, timeout=timeout)
+
+ def updateTestPrefsFile(self):
+ # The base method will either be no-op (and return the existing
+ # remote path), or return a path to a new local file.
+ testPrefsFile = xpcshell.XPCShellTestThread.updateTestPrefsFile(self)
+ if testPrefsFile == self.rootPrefsFile:
+ # The pref file is the shared one, which has been already pushed on the
+ # device, and so there is nothing more to do here.
+ return self.rootPrefsFile
+
+ # Push the per-test prefs file in the remote temp dir.
+ remoteTestPrefsFile = posixpath.join(self.remoteTmpDir, "user.js")
+ self.device.push(testPrefsFile, remoteTestPrefsFile)
+ self.device.chmod(remoteTestPrefsFile)
+ os.remove(testPrefsFile)
+ return remoteTestPrefsFile
+
+ def buildCmdTestFile(self, name):
+ remoteDir = self.remoteForLocal(os.path.dirname(name))
+ if remoteDir == self.remoteHere:
+ remoteName = os.path.basename(name)
+ else:
+ remoteName = posixpath.join(remoteDir, os.path.basename(name))
+ return [
+ "-e",
+ 'const _TEST_CWD = "%s";' % self.remoteHere,
+ "-e",
+ 'const _TEST_FILE = ["%s"];' % remoteName.replace("\\", "/"),
+ ]
+
+ def remoteForLocal(self, local):
+ for mapping in self.pathMapping:
+ if os.path.abspath(mapping.local) == os.path.abspath(local):
+ return mapping.remote
+ return local
+
+ def setupTempDir(self):
+ self.remoteTmpDir = posixpath.join(self.remoteTmpDir, str(uuid.uuid4()))
+ # make sure the temp dir exists
+ self.initDir(self.remoteTmpDir)
+ # env var is set in buildEnvironment
+ self.env["XPCSHELL_TEST_TEMP_DIR"] = self.remoteTmpDir
+ return self.remoteTmpDir
+
+ def setupProfileDir(self):
+ profileId = str(uuid.uuid4())
+ self.profileDir = posixpath.join(self.profileDir, profileId)
+ self.initDir(self.profileDir)
+ if self.interactive or self.singleFile:
+ self.log.info("profile dir is %s" % self.profileDir)
+ self.env["XPCSHELL_TEST_PROFILE_DIR"] = self.profileDir
+ self.env["TMPDIR"] = self.profileDir
+ self.remoteMinidumpDir = posixpath.join(self.remoteMinidumpRootDir, profileId)
+ self.initDir(self.remoteMinidumpDir)
+ self.env["XPCSHELL_MINIDUMP_DIR"] = self.remoteMinidumpDir
+ return self.profileDir
+
+ def clean_temp_dirs(self, name):
+ self.log.info("Cleaning up profile for %s folder: %s" % (name, self.profileDir))
+ self.device.rm(self.profileDir, force=True, recursive=True)
+ self.device.rm(self.remoteTmpDir, force=True, recursive=True)
+ self.device.rm(self.remoteMinidumpDir, force=True, recursive=True)
+
+ def setupMozinfoJS(self):
+ local = tempfile.mktemp()
+ mozinfo.output_to_file(local)
+ mozInfoJSPath = posixpath.join(self.profileDir, "mozinfo.json")
+ self.device.push(local, mozInfoJSPath)
+ self.device.chmod(mozInfoJSPath)
+ os.remove(local)
+ return mozInfoJSPath
+
+ def logCommand(self, name, completeCmd, testdir):
+ self.log.info("%s | full command: %r" % (name, completeCmd))
+ self.log.info("%s | current directory: %r" % (name, self.remoteHere))
+ self.log.info("%s | environment: %s" % (name, self.env))
+
+ def getHeadFiles(self, test):
+ """Override parent method to find files on remote device.
+
+ Obtains lists of head- files. Returns a list of head files.
+ """
+
+ def sanitize_list(s, kind):
+ for f in s.strip().split(" "):
+ f = f.strip()
+ if len(f) < 1:
+ continue
+
+ path = posixpath.join(self.remoteHere, f)
+
+ # skip check for file existence: the convenience of discovering
+ # a missing file does not justify the time cost of the round trip
+ # to the device
+ yield path
+
+ self.remoteHere = self.remoteForLocal(test["here"])
+
+ headlist = test.get("head", "")
+ return list(sanitize_list(headlist, "head"))
+
+ def buildXpcsCmd(self):
+ # change base class' paths to remote paths and use base class to build command
+ self.xpcshell = posixpath.join(self.remoteBinDir, "xpcw")
+ self.headJSPath = posixpath.join(self.remoteScriptsDir, "head.js")
+ self.httpdJSPath = posixpath.join(self.remoteComponentsDir, "httpd.js")
+ self.testingModulesDir = self.remoteModulesDir
+ self.testharnessdir = self.remoteScriptsDir
+ xpcsCmd = xpcshell.XPCShellTestThread.buildXpcsCmd(self)
+ # remove "-g <dir> -a <dir>" and replace with remote alternatives
+ del xpcsCmd[1:5]
+ if self.options["localAPK"]:
+ xpcsCmd.insert(1, "--greomni")
+ xpcsCmd.insert(2, self.remoteAPK)
+ xpcsCmd.insert(1, "-g")
+ xpcsCmd.insert(2, self.remoteBinDir)
+
+ if self.remoteDebugger:
+ # for example, "/data/local/gdbserver" "localhost:12345"
+ xpcsCmd = [self.remoteDebugger, self.remoteDebuggerArgs] + xpcsCmd
+ return xpcsCmd
+
+ def killTimeout(self, proc):
+ self.kill(proc)
+
+ def launchProcess(
+ self, cmd, stdout, stderr, env, cwd, timeout=None, test_name=None
+ ):
+ rpm = RemoteProcessMonitor(
+ "org.mozilla.geckoview.test_runner",
+ self.device,
+ self.log,
+ self.remoteLogFile,
+ )
+
+ startTime = datetime.datetime.now()
+
+ try:
+ pid = rpm.launch_service(
+ cmd[1:], self.env, self.selectedProcess, test_name=test_name
+ )
+ except Exception as e:
+ self.log.info(
+ "remotexpcshelltests.py | Failed to start process: %s" % str(e)
+ )
+ self.shellReturnCode = 1
+ return ""
+
+ self.log.info(
+ "remotexpcshelltests.py | %s | %s | Launched Test App"
+ % (test_name, str(pid))
+ )
+
+ if rpm.wait(timeout, test_name=test_name):
+ self.shellReturnCode = 0
+ else:
+ self.shellReturnCode = 1
+ self.log.info(
+ "remotexpcshelltests.py | %s | %s | Application ran for: %s"
+ % (test_name, str(pid), str(datetime.datetime.now() - startTime))
+ )
+
+ try:
+ return self.device.get_file(self.remoteLogFile)
+ except mozdevice.ADBTimeoutError:
+ raise
+ except Exception as e:
+ self.log.info(
+ "remotexpcshelltests.py | %s | %s | Could not read log file: %s"
+ % (test_name, str(pid), str(e))
+ )
+ self.shellReturnCode = 1
+ return ""
+
+ def checkForCrashes(self, dump_directory, symbols_path, test_name=None):
+ with mozfile.TemporaryDirectory() as dumpDir:
+ self.device.pull(self.remoteMinidumpDir, dumpDir)
+ crashed = mozcrash.log_crashes(
+ self.log, dumpDir, symbols_path, test=test_name
+ )
+ return crashed
+
+ def communicate(self, proc):
+ return proc, ""
+
+ def poll(self, proc):
+ if not self.device.process_exist("xpcshell"):
+ return self.getReturnCode(proc)
+ # Process is still running
+ return None
+
+ def kill(self, proc):
+ return self.device.pkill("xpcshell")
+
+ def getReturnCode(self, proc):
+ if self.shellReturnCode is not None:
+ return self.shellReturnCode
+ else:
+ return -1
+
+ def removeDir(self, dirname):
+ try:
+ self.device.rm(dirname, recursive=True)
+ except ADBTimeoutError:
+ raise
+ except Exception as e:
+ self.log.warning(str(e))
+
+ def createLogFile(self, test, stdout):
+ filename = test.replace("\\", "/").split("/")[-1] + ".log"
+ with open(filename, "wb") as f:
+ f.write(stdout)
+
+
+# A specialization of XPCShellTests that runs tests on an Android device.
+class XPCShellRemote(xpcshell.XPCShellTests, object):
+ def __init__(self, options, log):
+ xpcshell.XPCShellTests.__init__(self, log)
+
+ options["threadCount"] = min(options["threadCount"] or 4, 4)
+
+ self.options = options
+ verbose = False
+ if options["log_tbpl_level"] == "debug" or options["log_mach_level"] == "debug":
+ verbose = True
+ self.device = ADBDeviceFactory(
+ adb=options["adbPath"] or "adb",
+ device=options["deviceSerial"],
+ test_root=options["remoteTestRoot"],
+ verbose=verbose,
+ )
+ self.remoteTestRoot = posixpath.join(self.device.test_root, "xpc")
+ self.remoteLogFolder = posixpath.join(self.remoteTestRoot, "logs")
+ # Add Android version (SDK level) to mozinfo so that manifest entries
+ # can be conditional on android_version.
+ mozinfo.info["android_version"] = str(self.device.version)
+ mozinfo.info["is_emulator"] = self.device._device_serial.startswith("emulator-")
+
+ self.localBin = options["localBin"]
+ self.pathMapping = []
+ # remoteBinDir contains xpcshell and its wrapper script, both of which must
+ # be executable. Since +x permissions cannot usually be set on /mnt/sdcard,
+ # and the test root may be on /mnt/sdcard, remoteBinDir is set to be on
+ # /data/local, always.
+ self.remoteBinDir = posixpath.join(self.device.test_root, "xpcb")
+ # Terse directory names are used here ("c" for the components directory)
+ # to minimize the length of the command line used to execute
+ # xpcshell on the remote device. adb has a limit to the number
+ # of characters used in a shell command, and the xpcshell command
+ # line can be quite complex.
+ self.remoteTmpDir = posixpath.join(self.remoteTestRoot, "tmp")
+ self.remoteScriptsDir = self.remoteTestRoot
+ self.remoteComponentsDir = posixpath.join(self.remoteTestRoot, "c")
+ self.remoteModulesDir = posixpath.join(self.remoteTestRoot, "m")
+ self.remoteMinidumpRootDir = posixpath.join(self.remoteTestRoot, "minidumps")
+ self.profileDir = posixpath.join(self.remoteTestRoot, "p")
+ self.remoteDebugger = options["debugger"]
+ self.remoteDebuggerArgs = options["debuggerArgs"]
+ self.testingModulesDir = options["testingModulesDir"]
+
+ self.initDir(self.remoteTmpDir)
+ self.initDir(self.profileDir)
+
+ # Make sure we get a fresh start
+ self.device.stop_application("org.mozilla.geckoview.test_runner")
+
+ for i in range(options["threadCount"]):
+ RemoteProcessMonitor.processStatus += [False]
+
+ self.env = {}
+
+ if options["objdir"]:
+ self.xpcDir = os.path.join(options["objdir"], "_tests/xpcshell")
+ elif os.path.isdir(os.path.join(here, "tests")):
+ self.xpcDir = os.path.join(here, "tests")
+ else:
+ print("Couldn't find local xpcshell test directory", file=sys.stderr)
+ sys.exit(1)
+
+ self.remoteAPK = None
+ if options["localAPK"]:
+ self.localAPKContents = ZipFile(options["localAPK"])
+ self.remoteAPK = posixpath.join(
+ self.remoteBinDir, os.path.basename(options["localAPK"])
+ )
+ else:
+ self.localAPKContents = None
+ if options["setup"]:
+ self.setupTestDir()
+ self.setupUtilities()
+ self.setupModules()
+ self.initDir(self.remoteMinidumpRootDir)
+ self.initDir(self.remoteLogFolder)
+
+ eprefs = options.get("extraPrefs") or []
+ if options.get("disableFission"):
+ eprefs.append("fission.autostart=false")
+ else:
+ # should be by default, just in case
+ eprefs.append("fission.autostart=true")
+ options["extraPrefs"] = eprefs
+
+ # data that needs to be passed to the RemoteXPCShellTestThread
+ self.mobileArgs = {
+ "device": self.device,
+ "remoteBinDir": self.remoteBinDir,
+ "remoteScriptsDir": self.remoteScriptsDir,
+ "remoteComponentsDir": self.remoteComponentsDir,
+ "remoteModulesDir": self.remoteModulesDir,
+ "options": self.options,
+ "remoteDebugger": self.remoteDebugger,
+ "remoteDebuggerArgs": self.remoteDebuggerArgs,
+ "pathMapping": self.pathMapping,
+ "profileDir": self.profileDir,
+ "remoteLogFolder": self.remoteLogFolder,
+ "remoteTmpDir": self.remoteTmpDir,
+ "remoteMinidumpRootDir": self.remoteMinidumpRootDir,
+ }
+ if self.remoteAPK:
+ self.mobileArgs["remoteAPK"] = self.remoteAPK
+
+ def initDir(self, path, mask="777", timeout=None):
+ """Initialize a directory by removing it if it exists, creating it
+ and changing the permissions."""
+ self.device.rm(path, recursive=True, force=True, timeout=timeout)
+ self.device.mkdir(path, parents=True, timeout=timeout)
+
+ def setLD_LIBRARY_PATH(self):
+ self.env["LD_LIBRARY_PATH"] = self.remoteBinDir
+
+ def pushWrapper(self):
+ # Rather than executing xpcshell directly, this wrapper script is
+ # used. By setting environment variables and the cwd in the script,
+ # the length of the per-test command line is shortened. This is
+ # often important when using ADB, as there is a limit to the length
+ # of the ADB command line.
+ localWrapper = tempfile.mktemp()
+ with open(localWrapper, "w") as f:
+ f.write("#!/system/bin/sh\n")
+ for envkey, envval in six.iteritems(self.env):
+ f.write("export %s=%s\n" % (envkey, envval))
+ f.writelines(
+ [
+ "cd $1\n",
+ "echo xpcw: cd $1\n",
+ "shift\n",
+ 'echo xpcw: xpcshell "$@"\n',
+ '%s/xpcshell "$@"\n' % self.remoteBinDir,
+ ]
+ )
+ remoteWrapper = posixpath.join(self.remoteBinDir, "xpcw")
+ self.device.push(localWrapper, remoteWrapper)
+ self.device.chmod(remoteWrapper)
+ os.remove(localWrapper)
+
+ def start_test(self, test):
+ test.selectedProcess = RemoteProcessMonitor.pickUnusedProcess()
+ if test.selectedProcess == -1:
+ self.log.error(
+ "TEST-UNEXPECTED-FAIL | remotexpcshelltests.py | "
+ "no more free processes"
+ )
+ test.start()
+
+ def test_ended(self, test):
+ RemoteProcessMonitor.freeProcess(test.selectedProcess)
+
+ def buildPrefsFile(self, extraPrefs):
+ prefs = super(XPCShellRemote, self).buildPrefsFile(extraPrefs)
+ remotePrefsFile = posixpath.join(self.remoteTestRoot, "user.js")
+ self.device.push(self.prefsFile, remotePrefsFile)
+ self.device.chmod(remotePrefsFile)
+ # os.remove(self.prefsFile) is not called despite having pushed the
+ # file to the device, because the local file is relied upon by the
+ # updateTestPrefsFile method
+ self.prefsFile = remotePrefsFile
+ return prefs
+
+ def buildEnvironment(self):
+ self.buildCoreEnvironment()
+ self.setLD_LIBRARY_PATH()
+ self.env["MOZ_LINKER_CACHE"] = self.remoteBinDir
+ self.env["GRE_HOME"] = self.remoteBinDir
+ self.env["XPCSHELL_TEST_PROFILE_DIR"] = self.profileDir
+ self.env["HOME"] = self.profileDir
+ self.env["XPCSHELL_TEST_TEMP_DIR"] = self.remoteTmpDir
+ self.env["MOZ_ANDROID_DATA_DIR"] = self.remoteBinDir
+ self.env["MOZ_IN_AUTOMATION"] = "1"
+
+ # Guard against intermittent failures to retrieve abi property;
+ # without an abi, xpcshell cannot find greprefs.js and crashes.
+ abilistprop = None
+ abi = None
+ retries = 0
+ while not abi and retries < 3:
+ abi = self.device.get_prop("ro.product.cpu.abi")
+ retries += 1
+ if not abi:
+ raise Exception("failed to get ro.product.cpu.abi from device")
+ self.log.info("ro.product.cpu.abi %s" % abi)
+ if self.localAPKContents:
+ abilist = [abi]
+ retries = 0
+ while not abilistprop and retries < 3:
+ abilistprop = self.device.get_prop("ro.product.cpu.abilist")
+ retries += 1
+ self.log.info("ro.product.cpu.abilist %s" % abilistprop)
+ abi_found = False
+ names = [
+ n for n in self.localAPKContents.namelist() if n.startswith("lib/")
+ ]
+ self.log.debug("apk names: %s" % names)
+ if abilistprop and len(abilistprop) > 0:
+ abilist.extend(abilistprop.split(","))
+ for abicand in abilist:
+ abi_found = (
+ len([n for n in names if n.startswith("lib/%s" % abicand)]) > 0
+ )
+ if abi_found:
+ abi = abicand
+ break
+ if not abi_found:
+ self.log.info("failed to get matching abi from apk.")
+ if len(names) > 0:
+ self.log.info(
+ "device cpu abi not found in apk. Using abi from apk."
+ )
+ abi = names[0].split("/")[1]
+ self.log.info("Using abi %s." % abi)
+ self.env["MOZ_ANDROID_CPU_ABI"] = abi
+ self.log.info("Using env %r" % (self.env,))
+
+ def setupUtilities(self):
+ self.initDir(self.remoteTmpDir)
+ self.initDir(self.remoteBinDir)
+ remotePrefDir = posixpath.join(self.remoteBinDir, "defaults", "pref")
+ self.initDir(posixpath.join(remotePrefDir, "extra"))
+ self.initDir(self.remoteComponentsDir)
+
+ local = os.path.join(os.path.dirname(os.path.abspath(__file__)), "head.js")
+ remoteFile = posixpath.join(self.remoteScriptsDir, "head.js")
+ self.device.push(local, remoteFile)
+ self.device.chmod(remoteFile)
+
+ # Additional binaries are required for some tests. This list should be
+ # similar to TEST_HARNESS_BINS in testing/mochitest/Makefile.in.
+ binaries = [
+ "ssltunnel",
+ "certutil",
+ "pk12util",
+ "BadCertAndPinningServer",
+ "DelegatedCredentialsServer",
+ "EncryptedClientHelloServer",
+ "FaultyServer",
+ "OCSPStaplingServer",
+ "GenerateOCSPResponse",
+ "SanctionsTestServer",
+ ]
+ for fname in binaries:
+ local = os.path.join(self.localBin, fname)
+ if os.path.isfile(local):
+ print("Pushing %s.." % fname, file=sys.stderr)
+ remoteFile = posixpath.join(self.remoteBinDir, fname)
+ self.device.push(local, remoteFile)
+ self.device.chmod(remoteFile)
+ else:
+ print(
+ "*** Expected binary %s not found in %s!" % (fname, self.localBin),
+ file=sys.stderr,
+ )
+
+ local = os.path.join(self.localBin, "components/httpd.js")
+ remoteFile = posixpath.join(self.remoteComponentsDir, "httpd.js")
+ self.device.push(local, remoteFile)
+ self.device.chmod(remoteFile)
+
+ if self.options["localAPK"]:
+ remoteFile = posixpath.join(
+ self.remoteBinDir, os.path.basename(self.options["localAPK"])
+ )
+ self.device.push(self.options["localAPK"], remoteFile)
+ self.device.chmod(remoteFile)
+
+ self.pushLibs()
+ else:
+ localB2G = os.path.join(self.options["objdir"], "dist", "b2g")
+ if os.path.exists(localB2G):
+ self.device.push(localB2G, self.remoteBinDir)
+ self.device.chmod(self.remoteBinDir)
+ else:
+ raise Exception("unable to install gre: no APK and not b2g")
+
+ def pushLibs(self):
+ pushed_libs_count = 0
+ try:
+ dir = tempfile.mkdtemp()
+ for info in self.localAPKContents.infolist():
+ if info.filename.endswith(".so"):
+ print("Pushing %s.." % info.filename, file=sys.stderr)
+ remoteFile = posixpath.join(
+ self.remoteBinDir, os.path.basename(info.filename)
+ )
+ self.localAPKContents.extract(info, dir)
+ localFile = os.path.join(dir, info.filename)
+ self.device.push(localFile, remoteFile)
+ pushed_libs_count += 1
+ self.device.chmod(remoteFile)
+ finally:
+ shutil.rmtree(dir)
+ return pushed_libs_count
+
+ def setupModules(self):
+ if self.testingModulesDir:
+ self.device.push(self.testingModulesDir, self.remoteModulesDir)
+ self.device.chmod(self.remoteModulesDir)
+
+ def setupTestDir(self):
+ print("pushing %s" % self.xpcDir)
+ # The tests directory can be quite large: 5000 files and growing!
+ # Sometimes - like on a low-end aws instance running an emulator - the push
+ # may exceed the default 5 minute timeout, so we increase it here to 10 minutes.
+ self.device.rm(self.remoteScriptsDir, recursive=True, force=True, timeout=None)
+ self.device.push(self.xpcDir, self.remoteScriptsDir, timeout=600)
+ self.device.chmod(self.remoteScriptsDir, recursive=True)
+
+ def setupSocketConnections(self):
+ # make node host ports visible to device
+ if "MOZHTTP2_PORT" in self.env:
+ port = "tcp:{}".format(self.env["MOZHTTP2_PORT"])
+ self.device.create_socket_connection(
+ ADBDevice.SOCKET_DIRECTION_REVERSE, port, port
+ )
+ self.log.info("reversed MOZHTTP2_PORT connection for port " + port)
+ if "MOZNODE_EXEC_PORT" in self.env:
+ port = "tcp:{}".format(self.env["MOZNODE_EXEC_PORT"])
+ self.device.create_socket_connection(
+ ADBDevice.SOCKET_DIRECTION_REVERSE, port, port
+ )
+ self.log.info("reversed MOZNODE_EXEC_PORT connection for port " + port)
+
+ def buildTestList(self, test_tags=None, test_paths=None, verify=False):
+ xpcshell.XPCShellTests.buildTestList(
+ self, test_tags=test_tags, test_paths=test_paths, verify=verify
+ )
+ uniqueTestPaths = set([])
+ for test in self.alltests:
+ uniqueTestPaths.add(test["here"])
+ for testdir in uniqueTestPaths:
+ abbrevTestDir = os.path.relpath(testdir, self.xpcDir)
+ remoteScriptDir = posixpath.join(self.remoteScriptsDir, abbrevTestDir)
+ self.pathMapping.append(PathMapping(testdir, remoteScriptDir))
+ # This is not related to building the test list, but since this is called late
+ # in the test suite run, this is a convenient place to finalize preparations;
+ # in particular, these operations cannot be executed much earlier because
+ # self.env may not be finalized.
+ self.setupSocketConnections()
+ if self.options["setup"]:
+ self.pushWrapper()
+
+
+def verifyRemoteOptions(parser, options):
+ if isinstance(options, Namespace):
+ options = vars(options)
+
+ if options["localBin"] is None:
+ if options["objdir"]:
+ options["localBin"] = os.path.join(options["objdir"], "dist", "bin")
+ if not os.path.isdir(options["localBin"]):
+ parser.error("Couldn't find local binary dir, specify --local-bin-dir")
+ elif os.path.isfile(os.path.join(here, "..", "bin", "xpcshell")):
+ # assume tests are being run from a tests archive
+ options["localBin"] = os.path.abspath(os.path.join(here, "..", "bin"))
+ else:
+ parser.error("Couldn't find local binary dir, specify --local-bin-dir")
+ return options
+
+
+class PathMapping:
+ def __init__(self, localDir, remoteDir):
+ self.local = localDir
+ self.remote = remoteDir
+
+
+def main():
+ if sys.version_info < (2, 7):
+ print(
+ "Error: You must use python version 2.7 or newer but less than 3.0",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ parser = parser_remote()
+ options = parser.parse_args()
+
+ options = verifyRemoteOptions(parser, options)
+ log = commandline.setup_logging("Remote XPCShell", options, {"tbpl": sys.stdout})
+
+ if options["interactive"] and not options["testPath"]:
+ print(
+ "Error: You must specify a test filename in interactive mode!",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ if options["xpcshell"] is None:
+ options["xpcshell"] = "xpcshell"
+
+ # The threadCount depends on the emulator rather than the host machine and
+ # empirically 10 seems to yield the best performance.
+ options["threadCount"] = min(options["threadCount"], 10)
+
+ xpcsh = XPCShellRemote(options, log)
+
+ if not xpcsh.runTests(
+ options, testClass=RemoteXPCShellTestThread, mobileArgs=xpcsh.mobileArgs
+ ):
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/xpcshell/runxpcshelltests.py b/testing/xpcshell/runxpcshelltests.py
new file mode 100755
index 0000000000..8a5378e240
--- /dev/null
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -0,0 +1,2250 @@
+#!/usr/bin/env python
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import copy
+import json
+import os
+import pipes
+import random
+import re
+import shutil
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+from argparse import Namespace
+from collections import defaultdict, deque, namedtuple
+from contextlib import contextmanager
+from datetime import datetime, timedelta
+from functools import partial
+from multiprocessing import cpu_count
+from subprocess import PIPE, STDOUT, Popen
+from tempfile import gettempdir, mkdtemp
+from threading import Event, Thread, Timer, current_thread
+
+import mozdebug
+import six
+
+try:
+ import psutil
+
+ HAVE_PSUTIL = True
+except Exception:
+ HAVE_PSUTIL = False
+
+from xpcshellcommandline import parser_desktop
+
+SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
+
+try:
+ from mozbuild.base import MozbuildObject
+
+ build = MozbuildObject.from_environment(cwd=SCRIPT_DIR)
+except ImportError:
+ build = None
+
+HARNESS_TIMEOUT = 5 * 60
+
+# benchmarking on tbpl revealed that this works best for now
+NUM_THREADS = int(cpu_count() * 4)
+if sys.platform == "win32":
+ NUM_THREADS = NUM_THREADS / 2
+
+EXPECTED_LOG_ACTIONS = set(
+ [
+ "test_status",
+ "log",
+ ]
+)
+
+# --------------------------------------------------------------
+# TODO: this is a hack for mozbase without virtualenv, remove with bug 849900
+#
+here = os.path.dirname(__file__)
+mozbase = os.path.realpath(os.path.join(os.path.dirname(here), "mozbase"))
+
+if os.path.isdir(mozbase):
+ for package in os.listdir(mozbase):
+ sys.path.append(os.path.join(mozbase, package))
+
+import mozcrash
+import mozfile
+import mozinfo
+from manifestparser import TestManifest
+from manifestparser.filters import chunk_by_slice, failures, pathprefix, tags
+from manifestparser.util import normsep
+from mozlog import commandline
+from mozprofile import Profile
+from mozprofile.cli import parse_preferences
+from mozrunner.utils import get_stack_fixer_function
+
+# --------------------------------------------------------------
+
+# TODO: perhaps this should be in a more generally shared location?
+# This regex matches all of the C0 and C1 control characters
+# (U+0000 through U+001F; U+007F; U+0080 through U+009F),
+# except TAB (U+0009), CR (U+000D), LF (U+000A) and backslash (U+005C).
+# A raw string is deliberately not used.
+_cleanup_encoding_re = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f\\\\]")
+
+
+def _cleanup_encoding_repl(m):
+ c = m.group(0)
+ return "\\\\" if c == "\\" else "\\x{0:02X}".format(ord(c))
+
+
+def cleanup_encoding(s):
+ """S is either a byte or unicode string. Either way it may
+ contain control characters, unpaired surrogates, reserved code
+ points, etc. If it is a byte string, it is assumed to be
+ UTF-8, but it may not be *correct* UTF-8. Return a
+ sanitized unicode object."""
+ if not isinstance(s, six.string_types):
+ if isinstance(s, six.binary_type):
+ return six.ensure_str(s)
+ else:
+ return six.text_type(s)
+ if isinstance(s, six.binary_type):
+ s = s.decode("utf-8", "replace")
+ # Replace all C0 and C1 control characters with \xNN escapes.
+ return _cleanup_encoding_re.sub(_cleanup_encoding_repl, s)
+
+
+@contextmanager
+def popenCleanupHack():
+ """
+ Hack to work around https://bugs.python.org/issue37380
+ The basic idea is that on old versions of Python on Windows,
+ we need to clear subprocess._cleanup before we call Popen(),
+ then restore it afterwards.
+ """
+ savedCleanup = None
+ if mozinfo.isWin and sys.version_info[0] == 3 and sys.version_info < (3, 7, 5):
+ savedCleanup = subprocess._cleanup
+ subprocess._cleanup = lambda: None
+ try:
+ yield
+ finally:
+ if savedCleanup:
+ subprocess._cleanup = savedCleanup
+
+
+""" Control-C handling """
+gotSIGINT = False
+
+
+def markGotSIGINT(signum, stackFrame):
+ global gotSIGINT
+ gotSIGINT = True
+
+
+class XPCShellTestThread(Thread):
+ def __init__(
+ self, test_object, retry=True, verbose=False, usingTSan=False, **kwargs
+ ):
+ Thread.__init__(self)
+ self.daemon = True
+
+ self.test_object = test_object
+ self.retry = retry
+ self.verbose = verbose
+ self.usingTSan = usingTSan
+
+ self.appPath = kwargs.get("appPath")
+ self.xrePath = kwargs.get("xrePath")
+ self.utility_path = kwargs.get("utility_path")
+ self.testingModulesDir = kwargs.get("testingModulesDir")
+ self.debuggerInfo = kwargs.get("debuggerInfo")
+ self.jsDebuggerInfo = kwargs.get("jsDebuggerInfo")
+ self.httpdJSPath = kwargs.get("httpdJSPath")
+ self.headJSPath = kwargs.get("headJSPath")
+ self.testharnessdir = kwargs.get("testharnessdir")
+ self.profileName = kwargs.get("profileName")
+ self.singleFile = kwargs.get("singleFile")
+ self.env = copy.deepcopy(kwargs.get("env"))
+ self.symbolsPath = kwargs.get("symbolsPath")
+ self.logfiles = kwargs.get("logfiles")
+ self.app_binary = kwargs.get("app_binary")
+ self.xpcshell = kwargs.get("xpcshell")
+ self.xpcsRunArgs = kwargs.get("xpcsRunArgs")
+ self.failureManifest = kwargs.get("failureManifest")
+ self.jscovdir = kwargs.get("jscovdir")
+ self.stack_fixer_function = kwargs.get("stack_fixer_function")
+ self._rootTempDir = kwargs.get("tempDir")
+ self.cleanup_dir_list = kwargs.get("cleanup_dir_list")
+ self.pStdout = kwargs.get("pStdout")
+ self.pStderr = kwargs.get("pStderr")
+ self.keep_going = kwargs.get("keep_going")
+ self.log = kwargs.get("log")
+ self.app_dir_key = kwargs.get("app_dir_key")
+ self.interactive = kwargs.get("interactive")
+ self.rootPrefsFile = kwargs.get("rootPrefsFile")
+ self.extraPrefs = kwargs.get("extraPrefs")
+ self.verboseIfFails = kwargs.get("verboseIfFails")
+ self.headless = kwargs.get("headless")
+ self.runFailures = kwargs.get("runFailures")
+ self.timeoutAsPass = kwargs.get("timeoutAsPass")
+ self.crashAsPass = kwargs.get("crashAsPass")
+ self.conditionedProfileDir = kwargs.get("conditionedProfileDir")
+ if self.runFailures:
+ self.retry = False
+
+ # Default the test prefsFile to the rootPrefsFile.
+ self.prefsFile = self.rootPrefsFile
+
+ # only one of these will be set to 1. adding them to the totals in
+ # the harness
+ self.passCount = 0
+ self.todoCount = 0
+ self.failCount = 0
+
+ # Context for output processing
+ self.output_lines = []
+ self.has_failure_output = False
+ self.saw_proc_start = False
+ self.saw_proc_end = False
+ self.command = None
+ self.harness_timeout = kwargs.get("harness_timeout")
+ self.timedout = False
+
+ # event from main thread to signal work done
+ self.event = kwargs.get("event")
+ self.done = False # explicitly set flag so we don't rely on thread.isAlive
+
+ def run(self):
+ try:
+ self.run_test()
+ except Exception as e:
+ self.exception = e
+ self.traceback = traceback.format_exc()
+ else:
+ self.exception = None
+ self.traceback = None
+ if self.retry:
+ self.log.info(
+ "%s failed or timed out, will retry." % self.test_object["id"]
+ )
+ self.done = True
+ self.event.set()
+
+ def kill(self, proc):
+ """
+ Simple wrapper to kill a process.
+ On a remote system, this is overloaded to handle remote process communication.
+ """
+ return proc.kill()
+
+ def removeDir(self, dirname):
+ """
+ Simple wrapper to remove (recursively) a given directory.
+ On a remote system, we need to overload this to work on the remote filesystem.
+ """
+ mozfile.remove(dirname)
+
+ def poll(self, proc):
+ """
+ Simple wrapper to check if a process has terminated.
+ On a remote system, this is overloaded to handle remote process communication.
+ """
+ return proc.poll()
+
+ def createLogFile(self, test_file, stdout):
+ """
+ For a given test file and stdout buffer, create a log file.
+ On a remote system we have to fix the test name since it can contain directories.
+ """
+ with open(test_file + ".log", "w") as f:
+ f.write(stdout)
+
+ def getReturnCode(self, proc):
+ """
+ Simple wrapper to get the return code for a given process.
+ On a remote system we overload this to work with the remote process management.
+ """
+ if proc is not None and hasattr(proc, "returncode"):
+ return proc.returncode
+ return -1
+
+ def communicate(self, proc):
+ """
+ Simple wrapper to communicate with a process.
+ On a remote system, this is overloaded to handle remote process communication.
+ """
+ # Processing of incremental output put here to
+ # sidestep issues on remote platforms, where what we know
+ # as proc is a file pulled off of a device.
+ if proc.stdout:
+ while True:
+ line = proc.stdout.readline()
+ if not line:
+ break
+ self.process_line(line)
+
+ if self.saw_proc_start and not self.saw_proc_end:
+ self.has_failure_output = True
+
+ return proc.communicate()
+
+ def launchProcess(
+ self, cmd, stdout, stderr, env, cwd, timeout=None, test_name=None
+ ):
+ """
+ Simple wrapper to launch a process.
+ On a remote system, this is more complex and we need to overload this function.
+ """
+ # timeout is needed by remote xpcshell to extend the
+ # remote device timeout. It is not used in this function.
+ if six.PY3:
+ cwd = six.ensure_str(cwd)
+ for i in range(len(cmd)):
+ cmd[i] = six.ensure_str(cmd[i])
+
+ if HAVE_PSUTIL:
+ popen_func = psutil.Popen
+ else:
+ popen_func = Popen
+
+ with popenCleanupHack():
+ proc = popen_func(cmd, stdout=stdout, stderr=stderr, env=env, cwd=cwd)
+
+ return proc
+
+ def checkForCrashes(self, dump_directory, symbols_path, test_name=None):
+ """
+ Simple wrapper to check for crashes.
+ On a remote system, this is more complex and we need to overload this function.
+ """
+ quiet = False
+ if self.crashAsPass:
+ quiet = True
+
+ return mozcrash.log_crashes(
+ self.log, dump_directory, symbols_path, test=test_name, quiet=quiet
+ )
+
+ def logCommand(self, name, completeCmd, testdir):
+ self.log.info("%s | full command: %r" % (name, completeCmd))
+ self.log.info("%s | current directory: %r" % (name, testdir))
+ # Show only those environment variables that are changed from
+ # the ambient environment.
+ changedEnv = set("%s=%s" % i for i in six.iteritems(self.env)) - set(
+ "%s=%s" % i for i in six.iteritems(os.environ)
+ )
+ self.log.info("%s | environment: %s" % (name, list(changedEnv)))
+ shell_command_tokens = [
+ pipes.quote(tok) for tok in list(changedEnv) + completeCmd
+ ]
+ self.log.info(
+ "%s | as shell command: (cd %s; %s)"
+ % (name, pipes.quote(testdir), " ".join(shell_command_tokens))
+ )
+
+ def killTimeout(self, proc):
+ if proc is not None and hasattr(proc, "pid"):
+ mozcrash.kill_and_get_minidump(
+ proc.pid, self.tempDir, utility_path=self.utility_path
+ )
+ else:
+ self.log.info("not killing -- proc or pid unknown")
+
+ def postCheck(self, proc):
+ """Checks for a still-running test process, kills it and fails the test if found.
+ We can sometimes get here before the process has terminated, which would
+ cause removeDir() to fail - so check for the process and kill it if needed.
+ """
+ if proc and self.poll(proc) is None:
+ if HAVE_PSUTIL:
+ try:
+ self.kill(proc)
+ except psutil.NoSuchProcess:
+ pass
+ else:
+ self.kill(proc)
+ message = "%s | Process still running after test!" % self.test_object["id"]
+ if self.retry:
+ self.log.info(message)
+ return
+
+ self.log.error(message)
+ self.log_full_output()
+ self.failCount = 1
+
+ def testTimeout(self, proc):
+ if self.test_object["expected"] == "pass":
+ expected = "PASS"
+ else:
+ expected = "FAIL"
+
+ if self.retry:
+ self.log.test_end(
+ self.test_object["id"],
+ "TIMEOUT",
+ expected="TIMEOUT",
+ message="Test timed out",
+ )
+ else:
+ result = "TIMEOUT"
+ if self.timeoutAsPass:
+ expected = "FAIL"
+ result = "FAIL"
+ self.failCount = 1
+ self.log.test_end(
+ self.test_object["id"],
+ result,
+ expected=expected,
+ message="Test timed out",
+ )
+ self.log_full_output()
+
+ self.done = True
+ self.timedout = True
+ self.killTimeout(proc)
+ self.log.info("xpcshell return code: %s" % self.getReturnCode(proc))
+ self.postCheck(proc)
+ self.clean_temp_dirs(self.test_object["path"])
+
+ def updateTestPrefsFile(self):
+ # If the Manifest file has some additional prefs, merge the
+ # prefs set in the user.js file stored in the _rootTempdir
+ # with the prefs from the manifest and the prefs specified
+ # in the extraPrefs option.
+ if "prefs" in self.test_object:
+ # Merge the user preferences in a fake profile dir in a
+ # local temporary dir (self.tempDir is the remoteTmpDir
+ # for the RemoteXPCShellTestThread subclass and so we
+ # can't use that tempDir here).
+ localTempDir = mkdtemp(prefix="xpc-other-", dir=self._rootTempDir)
+
+ filename = "user.js"
+ interpolation = {"server": "dummyserver"}
+ profile = Profile(profile=localTempDir, restore=False)
+ # _rootTempDir contains a user.js file, generated by buildPrefsFile
+ profile.merge(self._rootTempDir, interpolation=interpolation)
+
+ prefs = self.test_object["prefs"].strip().split()
+ name = self.test_object["id"]
+ if self.verbose:
+ self.log.info(
+ "%s: Per-test extra prefs will be set:\n {}".format(
+ "\n ".join(prefs)
+ )
+ % name
+ )
+
+ profile.set_preferences(parse_preferences(prefs), filename=filename)
+ # Make sure that the extra prefs form the command line are overriding
+ # any prefs inherited from the shared profile data or the manifest prefs.
+ profile.set_preferences(
+ parse_preferences(self.extraPrefs), filename=filename
+ )
+ return os.path.join(profile.profile, filename)
+
+ # Return the root prefsFile if there is no other prefs to merge.
+ # This is the path set by buildPrefsFile.
+ return self.rootPrefsFile
+
+ @property
+ def conditioned_profile_copy(self):
+ """Returns a copy of the original conditioned profile that was created."""
+
+ condprof_copy = os.path.join(tempfile.mkdtemp(), "profile")
+ shutil.copytree(
+ self.conditionedProfileDir,
+ condprof_copy,
+ ignore=shutil.ignore_patterns("lock"),
+ )
+ self.log.info("Created a conditioned-profile copy: %s" % condprof_copy)
+ return condprof_copy
+
+ def buildCmdTestFile(self, name):
+ """
+ Build the command line arguments for the test file.
+ On a remote system, this may be overloaded to use a remote path structure.
+ """
+ return ["-e", 'const _TEST_FILE = ["%s"];' % name.replace("\\", "/")]
+
+ def setupTempDir(self):
+ tempDir = mkdtemp(prefix="xpc-other-", dir=self._rootTempDir)
+ self.env["XPCSHELL_TEST_TEMP_DIR"] = tempDir
+ if self.interactive:
+ self.log.info("temp dir is %s" % tempDir)
+ return tempDir
+
+ def setupProfileDir(self):
+ """
+ Create a temporary folder for the profile and set appropriate environment variables.
+ When running check-interactive and check-one, the directory is well-defined and
+ retained for inspection once the tests complete.
+
+ On a remote system, this may be overloaded to use a remote path structure.
+ """
+ if self.conditionedProfileDir:
+ profileDir = self.conditioned_profile_copy
+ elif self.interactive or self.singleFile:
+ profileDir = os.path.join(gettempdir(), self.profileName, "xpcshellprofile")
+ try:
+ # This could be left over from previous runs
+ self.removeDir(profileDir)
+ except Exception:
+ pass
+ os.makedirs(profileDir)
+ else:
+ profileDir = mkdtemp(prefix="xpc-profile-", dir=self._rootTempDir)
+ self.env["XPCSHELL_TEST_PROFILE_DIR"] = profileDir
+ if self.interactive or self.singleFile:
+ self.log.info("profile dir is %s" % profileDir)
+ return profileDir
+
+ def setupMozinfoJS(self):
+ mozInfoJSPath = os.path.join(self.profileDir, "mozinfo.json")
+ mozInfoJSPath = mozInfoJSPath.replace("\\", "\\\\")
+ mozinfo.output_to_file(mozInfoJSPath)
+ return mozInfoJSPath
+
+ def buildCmdHead(self):
+ """
+ Build the command line arguments for the head files,
+ along with the address of the webserver which some tests require.
+
+ On a remote system, this is overloaded to resolve quoting issues over a
+ secondary command line.
+ """
+ headfiles = self.getHeadFiles(self.test_object)
+ cmdH = ", ".join(['"' + f.replace("\\", "/") + '"' for f in headfiles])
+
+ dbgport = 0 if self.jsDebuggerInfo is None else self.jsDebuggerInfo.port
+
+ return [
+ "-e",
+ "const _HEAD_FILES = [%s];" % cmdH,
+ "-e",
+ "const _JSDEBUGGER_PORT = %d;" % dbgport,
+ ]
+
+ def getHeadFiles(self, test):
+ """Obtain lists of head- files. Returns a list of head files."""
+
+ def sanitize_list(s, kind):
+ for f in s.strip().split(" "):
+ f = f.strip()
+ if len(f) < 1:
+ continue
+
+ path = os.path.normpath(os.path.join(test["here"], f))
+ if not os.path.exists(path):
+ raise Exception("%s file does not exist: %s" % (kind, path))
+
+ if not os.path.isfile(path):
+ raise Exception("%s file is not a file: %s" % (kind, path))
+
+ yield path
+
+ headlist = test.get("head", "")
+ return list(sanitize_list(headlist, "head"))
+
+ def buildXpcsCmd(self):
+ """
+ Load the root head.js file as the first file in our test path, before other head,
+ and test files. On a remote system, we overload this to add additional command
+ line arguments, so this gets overloaded.
+ """
+ # - NOTE: if you rename/add any of the constants set here, update
+ # do_load_child_test_harness() in head.js
+ if not self.appPath:
+ self.appPath = self.xrePath
+
+ if self.app_binary:
+ xpcsCmd = [
+ self.app_binary,
+ "--xpcshell",
+ ]
+ else:
+ xpcsCmd = [
+ self.xpcshell,
+ ]
+
+ xpcsCmd += [
+ "-g",
+ self.xrePath,
+ "-a",
+ self.appPath,
+ "-m",
+ "-e",
+ 'const _HEAD_JS_PATH = "%s";' % self.headJSPath,
+ "-e",
+ 'const _MOZINFO_JS_PATH = "%s";' % self.mozInfoJSPath,
+ "-e",
+ 'const _PREFS_FILE = "%s";' % self.prefsFile.replace("\\", "\\\\"),
+ ]
+
+ if self.testingModulesDir:
+ # Escape backslashes in string literal.
+ sanitized = self.testingModulesDir.replace("\\", "\\\\")
+ xpcsCmd.extend(["-e", 'const _TESTING_MODULES_DIR = "%s";' % sanitized])
+
+ xpcsCmd.extend(["-f", os.path.join(self.testharnessdir, "head.js")])
+
+ if self.debuggerInfo:
+ xpcsCmd = [self.debuggerInfo.path] + self.debuggerInfo.args + xpcsCmd
+
+ return xpcsCmd
+
+ def cleanupDir(self, directory, name):
+ if not os.path.exists(directory):
+ return
+
+ # up to TRY_LIMIT attempts (one every second), because
+ # the Windows filesystem is slow to react to the changes
+ TRY_LIMIT = 25
+ try_count = 0
+ while try_count < TRY_LIMIT:
+ try:
+ self.removeDir(directory)
+ except OSError:
+ self.log.info("Failed to remove directory: %s. Waiting." % directory)
+ # We suspect the filesystem may still be making changes. Wait a
+ # little bit and try again.
+ time.sleep(1)
+ try_count += 1
+ else:
+ # removed fine
+ return
+
+ # we try cleaning up again later at the end of the run
+ self.cleanup_dir_list.append(directory)
+
+ def clean_temp_dirs(self, name):
+ # We don't want to delete the profile when running check-interactive
+ # or check-one.
+ if self.profileDir and not self.interactive and not self.singleFile:
+ self.cleanupDir(self.profileDir, name)
+
+ self.cleanupDir(self.tempDir, name)
+
+ def parse_output(self, output):
+ """Parses process output for structured messages and saves output as it is
+ read. Sets self.has_failure_output in case of evidence of a failure"""
+ for line_string in output.splitlines():
+ self.process_line(line_string)
+
+ if self.saw_proc_start and not self.saw_proc_end:
+ self.has_failure_output = True
+
+ def fix_text_output(self, line):
+ line = cleanup_encoding(line)
+ if self.stack_fixer_function is not None:
+ line = self.stack_fixer_function(line)
+
+ if isinstance(line, bytes):
+ line = line.decode("utf-8")
+ return line
+
+ def log_line(self, line):
+ """Log a line of output (either a parser json object or text output from
+ the test process"""
+ if isinstance(line, six.string_types) or isinstance(line, bytes):
+ line = self.fix_text_output(line).rstrip("\r\n")
+ self.log.process_output(self.proc_ident, line, command=self.command)
+ else:
+ if "message" in line:
+ line["message"] = self.fix_text_output(line["message"])
+ if "xpcshell_process" in line:
+ line["thread"] = " ".join(
+ [current_thread().name, line["xpcshell_process"]]
+ )
+ else:
+ line["thread"] = current_thread().name
+ self.log.log_raw(line)
+
+ def log_full_output(self):
+ """Logs any buffered output from the test process, and clears the buffer."""
+ if not self.output_lines:
+ return
+ self.log.info(">>>>>>>")
+ for line in self.output_lines:
+ self.log_line(line)
+ self.log.info("<<<<<<<")
+ self.output_lines = []
+
+ def report_message(self, message):
+ """Stores or logs a json log message in mozlog format."""
+ if self.verbose:
+ self.log_line(message)
+ else:
+ self.output_lines.append(message)
+
+ def process_line(self, line_string):
+ """Parses a single line of output, determining its significance and
+ reporting a message.
+ """
+ if isinstance(line_string, bytes):
+ # Transform binary to string representation
+ line_string = line_string.decode(sys.stdout.encoding, errors="replace")
+
+ if not line_string.strip():
+ return
+
+ try:
+ line_object = json.loads(line_string)
+ if not isinstance(line_object, dict):
+ self.report_message(line_string)
+ return
+ except ValueError:
+ self.report_message(line_string)
+ return
+
+ if (
+ "action" not in line_object
+ or line_object["action"] not in EXPECTED_LOG_ACTIONS
+ ):
+ # The test process output JSON.
+ self.report_message(line_string)
+ return
+
+ action = line_object["action"]
+
+ self.has_failure_output = (
+ self.has_failure_output
+ or "expected" in line_object
+ or action == "log"
+ and line_object["level"] == "ERROR"
+ )
+
+ self.report_message(line_object)
+
+ if action == "log" and line_object["message"] == "CHILD-TEST-STARTED":
+ self.saw_proc_start = True
+ elif action == "log" and line_object["message"] == "CHILD-TEST-COMPLETED":
+ self.saw_proc_end = True
+
+ def run_test(self):
+ """Run an individual xpcshell test."""
+ global gotSIGINT
+
+ name = self.test_object["id"]
+ path = self.test_object["path"]
+
+ # Check for skipped tests
+ if "disabled" in self.test_object:
+ message = self.test_object["disabled"]
+ if not message:
+ message = "disabled from xpcshell manifest"
+ self.log.test_start(name)
+ self.log.test_end(name, "SKIP", message=message)
+
+ self.retry = False
+ self.keep_going = True
+ return
+
+ # Check for known-fail tests
+ expect_pass = self.test_object["expected"] == "pass"
+
+ # By default self.appPath will equal the gre dir. If specified in the
+ # xpcshell.ini file, set a different app dir for this test.
+ if self.app_dir_key and self.app_dir_key in self.test_object:
+ rel_app_dir = self.test_object[self.app_dir_key]
+ rel_app_dir = os.path.join(self.xrePath, rel_app_dir)
+ self.appPath = os.path.abspath(rel_app_dir)
+ else:
+ self.appPath = None
+
+ test_dir = os.path.dirname(path)
+
+ # Create a profile and a temp dir that the JS harness can stick
+ # a profile and temporary data in
+ self.profileDir = self.setupProfileDir()
+ self.tempDir = self.setupTempDir()
+ self.mozInfoJSPath = self.setupMozinfoJS()
+
+ # Setup per-manifest prefs and write them into the tempdir.
+ self.prefsFile = self.updateTestPrefsFile()
+
+ # The order of the command line is important:
+ # 1) Arguments for xpcshell itself
+ self.command = self.buildXpcsCmd()
+
+ # 2) Arguments for the head files
+ self.command.extend(self.buildCmdHead())
+
+ # 3) Arguments for the test file
+ self.command.extend(self.buildCmdTestFile(path))
+ self.command.extend(["-e", 'const _TEST_NAME = "%s";' % name])
+
+ # 4) Arguments for code coverage
+ if self.jscovdir:
+ self.command.extend(
+ ["-e", 'const _JSCOV_DIR = "%s";' % self.jscovdir.replace("\\", "/")]
+ )
+
+ # 5) Runtime arguments
+ if "debug" in self.test_object:
+ self.command.append("-d")
+
+ self.command.extend(self.xpcsRunArgs)
+
+ if self.test_object.get("dmd") == "true":
+ self.env["PYTHON"] = sys.executable
+ self.env["BREAKPAD_SYMBOLS_PATH"] = self.symbolsPath
+
+ if self.test_object.get("subprocess") == "true":
+ self.env["PYTHON"] = sys.executable
+
+ if (
+ self.test_object.get("headless", "true" if self.headless else None)
+ == "true"
+ ):
+ self.env["MOZ_HEADLESS"] = "1"
+ self.env["DISPLAY"] = "77" # Set a fake display.
+
+ testTimeoutInterval = self.harness_timeout
+ # Allow a test to request a multiple of the timeout if it is expected to take long
+ if "requesttimeoutfactor" in self.test_object:
+ testTimeoutInterval *= int(self.test_object["requesttimeoutfactor"])
+
+ testTimer = None
+ if not self.interactive and not self.debuggerInfo and not self.jsDebuggerInfo:
+ testTimer = Timer(testTimeoutInterval, lambda: self.testTimeout(proc))
+ testTimer.start()
+
+ proc = None
+ process_output = None
+
+ try:
+ self.log.test_start(name)
+ if self.verbose:
+ self.logCommand(name, self.command, test_dir)
+
+ proc = self.launchProcess(
+ self.command,
+ stdout=self.pStdout,
+ stderr=self.pStderr,
+ env=self.env,
+ cwd=test_dir,
+ timeout=testTimeoutInterval,
+ test_name=name,
+ )
+
+ if hasattr(proc, "pid"):
+ self.proc_ident = proc.pid
+ else:
+ # On mobile, "proc" is just a file.
+ self.proc_ident = name
+
+ if self.interactive:
+ self.log.info("%s | Process ID: %d" % (name, self.proc_ident))
+
+ # Communicate returns a tuple of (stdout, stderr), however we always
+ # redirect stderr to stdout, so the second element is ignored.
+ process_output, _ = self.communicate(proc)
+
+ if self.interactive:
+ # Not sure what else to do here...
+ self.keep_going = True
+ return
+
+ if testTimer:
+ testTimer.cancel()
+
+ if process_output:
+ # For the remote case, stdout is not yet depleted, so we parse
+ # it here all at once.
+ self.parse_output(process_output)
+
+ return_code = self.getReturnCode(proc)
+
+ # TSan'd processes return 66 if races are detected. This isn't
+ # good in the sense that there's no way to distinguish between
+ # a process that would normally have returned zero but has races,
+ # and a race-free process that returns 66. But I don't see how
+ # to do better. This ambiguity is at least constrained to the
+ # with-TSan case. It doesn't affect normal builds.
+ #
+ # This also assumes that the magic value 66 isn't overridden by
+ # a TSAN_OPTIONS=exitcode=<number> environment variable setting.
+ #
+ TSAN_EXIT_CODE_WITH_RACES = 66
+
+ return_code_ok = return_code == 0 or (
+ self.usingTSan and return_code == TSAN_EXIT_CODE_WITH_RACES
+ )
+ passed = (not self.has_failure_output) and return_code_ok
+
+ status = "PASS" if passed else "FAIL"
+ expected = "PASS" if expect_pass else "FAIL"
+ message = "xpcshell return code: %d" % return_code
+
+ if self.timedout:
+ return
+
+ if status != expected:
+ if self.retry:
+ self.log.test_end(
+ name,
+ status,
+ expected=status,
+ message="Test failed or timed out, will retry",
+ )
+ self.clean_temp_dirs(path)
+ if self.verboseIfFails and not self.verbose:
+ self.log_full_output()
+ return
+
+ self.log.test_end(name, status, expected=expected, message=message)
+ self.log_full_output()
+
+ self.failCount += 1
+
+ if self.failureManifest:
+ with open(self.failureManifest, "a") as f:
+ f.write("[%s]\n" % self.test_object["path"])
+ for k, v in self.test_object.items():
+ f.write("%s = %s\n" % (k, v))
+
+ else:
+ # If TSan reports a race, dump the output, else we can't
+ # diagnose what the problem was. See comments above about
+ # the significance of TSAN_EXIT_CODE_WITH_RACES.
+ if self.usingTSan and return_code == TSAN_EXIT_CODE_WITH_RACES:
+ self.log_full_output()
+
+ self.log.test_end(name, status, expected=expected, message=message)
+ if self.verbose:
+ self.log_full_output()
+
+ self.retry = False
+
+ if expect_pass:
+ self.passCount = 1
+ else:
+ self.todoCount = 1
+
+ if self.checkForCrashes(self.tempDir, self.symbolsPath, test_name=name):
+ if self.retry:
+ self.clean_temp_dirs(path)
+ return
+
+ # If we assert during shutdown there's a chance the test has passed
+ # but we haven't logged full output, so do so here.
+ self.log_full_output()
+ self.failCount = 1
+
+ if self.logfiles and process_output:
+ self.createLogFile(name, process_output)
+
+ finally:
+ self.postCheck(proc)
+ self.clean_temp_dirs(path)
+
+ if gotSIGINT:
+ self.log.error("Received SIGINT (control-C) during test execution")
+ if self.keep_going:
+ gotSIGINT = False
+ else:
+ self.keep_going = False
+ return
+
+ self.keep_going = True
+
+
+class XPCShellTests(object):
+ def __init__(self, log=None):
+ """Initializes node status and logger."""
+ self.log = log
+ self.harness_timeout = HARNESS_TIMEOUT
+ self.nodeProc = {}
+ self.http3ServerProc = {}
+ self.conditioned_profile_dir = None
+
+ def getTestManifest(self, manifest):
+ if isinstance(manifest, TestManifest):
+ return manifest
+ elif manifest is not None:
+ manifest = os.path.normpath(os.path.abspath(manifest))
+ if os.path.isfile(manifest):
+ return TestManifest([manifest], strict=True)
+ else:
+ ini_path = os.path.join(manifest, "xpcshell.ini")
+ else:
+ ini_path = os.path.join(SCRIPT_DIR, "tests", "xpcshell.ini")
+
+ if os.path.exists(ini_path):
+ return TestManifest([ini_path], strict=True)
+ else:
+ self.log.error(
+ "Failed to find manifest at %s; use --manifest "
+ "to set path explicitly." % ini_path
+ )
+ sys.exit(1)
+
+ def normalizeTest(self, root, test_object):
+ path = test_object.get("file_relpath", test_object["relpath"])
+ if "dupe-manifest" in test_object and "ancestor_manifest" in test_object:
+ test_object["id"] = "%s:%s" % (
+ os.path.basename(test_object["ancestor_manifest"]),
+ path,
+ )
+ else:
+ test_object["id"] = path
+
+ if root:
+ test_object["manifest"] = os.path.relpath(test_object["manifest"], root)
+
+ if os.sep != "/":
+ for key in ("id", "manifest"):
+ test_object[key] = test_object[key].replace(os.sep, "/")
+
+ return test_object
+
+ def buildTestList(self, test_tags=None, test_paths=None, verify=False):
+ """Reads the xpcshell.ini manifest and set self.alltests to an array.
+
+ Given the parameters, this method compiles a list of tests to be run
+ that matches the criteria set by parameters.
+
+ If any chunking of tests are to occur, it is also done in this method.
+
+ If no tests are added to the list of tests to be run, an error
+ is logged. A sys.exit() signal is sent to the caller.
+
+ Args:
+ test_tags (list, optional): list of strings.
+ test_paths (list, optional): list of strings derived from the command
+ line argument provided by user, specifying
+ tests to be run.
+ verify (bool, optional): boolean value.
+ """
+ if test_paths is None:
+ test_paths = []
+
+ mp = self.getTestManifest(self.manifest)
+
+ root = mp.rootdir
+ if build and not root:
+ root = build.topsrcdir
+ normalize = partial(self.normalizeTest, root)
+
+ filters = []
+ if test_tags:
+ filters.append(tags(test_tags))
+
+ path_filter = None
+ if test_paths:
+ path_filter = pathprefix(test_paths)
+ filters.append(path_filter)
+
+ noDefaultFilters = False
+ if self.runFailures:
+ filters.append(failures(self.runFailures))
+ noDefaultFilters = True
+
+ if self.totalChunks > 1:
+ filters.append(chunk_by_slice(self.thisChunk, self.totalChunks))
+ try:
+ self.alltests = list(
+ map(
+ normalize,
+ mp.active_tests(
+ filters=filters,
+ noDefaultFilters=noDefaultFilters,
+ **mozinfo.info,
+ ),
+ )
+ )
+ except TypeError:
+ sys.stderr.write("*** offending mozinfo.info: %s\n" % repr(mozinfo.info))
+ raise
+
+ if path_filter and path_filter.missing:
+ self.log.warning(
+ "The following path(s) didn't resolve any tests:\n {}".format(
+ " \n".join(sorted(path_filter.missing))
+ )
+ )
+
+ if len(self.alltests) == 0:
+ if (
+ test_paths
+ and path_filter.missing == set(test_paths)
+ and os.environ.get("MOZ_AUTOMATION") == "1"
+ ):
+ # This can happen in CI when a manifest doesn't exist due to a
+ # build config variable in moz.build traversal. Don't generate
+ # an error in this case. Adding a todo count avoids mozharness
+ # raising an error.
+ self.todoCount += len(path_filter.missing)
+ else:
+ self.log.error(
+ "no tests to run using specified "
+ "combination of filters: {}".format(mp.fmt_filters())
+ )
+ sys.exit(1)
+
+ if len(self.alltests) == 1 and not verify:
+ self.singleFile = os.path.basename(self.alltests[0]["path"])
+ else:
+ self.singleFile = None
+
+ if self.dump_tests:
+ self.dump_tests = os.path.expanduser(self.dump_tests)
+ assert os.path.exists(os.path.dirname(self.dump_tests))
+ with open(self.dump_tests, "w") as dumpFile:
+ dumpFile.write(json.dumps({"active_tests": self.alltests}))
+
+ self.log.info("Dumping active_tests to %s file." % self.dump_tests)
+ sys.exit()
+
+ def setAbsPath(self):
+ """
+ Set the absolute path for xpcshell, httpdjspath and xrepath. These 3 variables
+ depend on input from the command line and we need to allow for absolute paths.
+ This function is overloaded for a remote solution as os.path* won't work remotely.
+ """
+ self.testharnessdir = os.path.dirname(os.path.abspath(__file__))
+ self.headJSPath = self.testharnessdir.replace("\\", "/") + "/head.js"
+ if self.xpcshell is not None:
+ self.xpcshell = os.path.abspath(self.xpcshell)
+
+ if self.app_binary is not None:
+ self.app_binary = os.path.abspath(self.app_binary)
+
+ if self.xrePath is None:
+ binary_path = self.app_binary or self.xpcshell
+ self.xrePath = os.path.dirname(binary_path)
+ if mozinfo.isMac:
+ # Check if we're run from an OSX app bundle and override
+ # self.xrePath if we are.
+ appBundlePath = os.path.join(
+ os.path.dirname(os.path.dirname(self.xpcshell)), "Resources"
+ )
+ if os.path.exists(os.path.join(appBundlePath, "application.ini")):
+ self.xrePath = appBundlePath
+ else:
+ self.xrePath = os.path.abspath(self.xrePath)
+
+ # httpd.js belongs in xrePath/components, which is Contents/Resources on mac
+ self.httpdJSPath = os.path.join(self.xrePath, "components", "httpd.js")
+ self.httpdJSPath = self.httpdJSPath.replace("\\", "/")
+
+ if self.mozInfo is None:
+ self.mozInfo = os.path.join(self.testharnessdir, "mozinfo.json")
+
+ def buildPrefsFile(self, extraPrefs):
+ # Create the prefs.js file
+
+ # In test packages used in CI, the profile_data directory is installed
+ # in the SCRIPT_DIR.
+ profile_data_dir = os.path.join(SCRIPT_DIR, "profile_data")
+ # If possible, read profile data from topsrcdir. This prevents us from
+ # requiring a re-build to pick up newly added extensions in the
+ # <profile>/extensions directory.
+ if build:
+ path = os.path.join(build.topsrcdir, "testing", "profiles")
+ if os.path.isdir(path):
+ profile_data_dir = path
+ # Still not found? Look for testing/profiles relative to testing/xpcshell.
+ if not os.path.isdir(profile_data_dir):
+ path = os.path.abspath(os.path.join(SCRIPT_DIR, "..", "profiles"))
+ if os.path.isdir(path):
+ profile_data_dir = path
+
+ with open(os.path.join(profile_data_dir, "profiles.json"), "r") as fh:
+ base_profiles = json.load(fh)["xpcshell"]
+
+ # values to use when interpolating preferences
+ interpolation = {
+ "server": "dummyserver",
+ }
+
+ profile = Profile(profile=self.tempDir, restore=False)
+ prefsFile = os.path.join(profile.profile, "user.js")
+
+ # Empty the user.js file in case the file existed before.
+ with open(prefsFile, "w"):
+ pass
+
+ for name in base_profiles:
+ path = os.path.join(profile_data_dir, name)
+ profile.merge(path, interpolation=interpolation)
+
+ # add command line prefs
+ prefs = parse_preferences(extraPrefs)
+ profile.set_preferences(prefs)
+
+ self.prefsFile = prefsFile
+ return prefs
+
+ def buildCoreEnvironment(self):
+ """
+ Add environment variables likely to be used across all platforms, including
+ remote systems.
+ """
+ # Make assertions fatal
+ self.env["XPCOM_DEBUG_BREAK"] = "stack-and-abort"
+ # Crash reporting interferes with debugging
+ if not self.debuggerInfo:
+ self.env["MOZ_CRASHREPORTER"] = "1"
+ # Don't launch the crash reporter client
+ self.env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
+ # Don't permit remote connections by default.
+ # MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
+ # enable non-local connections for the purposes of local testing.
+ # Don't override the user's choice here. See bug 1049688.
+ self.env.setdefault("MOZ_DISABLE_NONLOCAL_CONNECTIONS", "1")
+ if self.mozInfo.get("topsrcdir") is not None:
+ self.env["MOZ_DEVELOPER_REPO_DIR"] = self.mozInfo["topsrcdir"]
+ if self.mozInfo.get("topobjdir") is not None:
+ self.env["MOZ_DEVELOPER_OBJ_DIR"] = self.mozInfo["topobjdir"]
+
+ # Disable the content process sandbox for the xpcshell tests. They
+ # currently attempt to do things like bind() sockets, which is not
+ # compatible with the sandbox.
+ self.env["MOZ_DISABLE_CONTENT_SANDBOX"] = "1"
+ if os.getenv("MOZ_FETCHES_DIR", None):
+ self.env["MOZ_FETCHES_DIR"] = os.getenv("MOZ_FETCHES_DIR", None)
+
+ if self.mozInfo.get("socketprocess_networking"):
+ self.env["MOZ_FORCE_USE_SOCKET_PROCESS"] = "1"
+ else:
+ self.env["MOZ_DISABLE_SOCKET_PROCESS"] = "1"
+
+ def buildEnvironment(self):
+ """
+ Create and returns a dictionary of self.env to include all the appropriate env
+ variables and values. On a remote system, we overload this to set different
+ values and are missing things like os.environ and PATH.
+ """
+ self.env = dict(os.environ)
+ self.buildCoreEnvironment()
+ if sys.platform == "win32":
+ self.env["PATH"] = self.env["PATH"] + ";" + self.xrePath
+ elif sys.platform in ("os2emx", "os2knix"):
+ os.environ["BEGINLIBPATH"] = self.xrePath + ";" + self.env["BEGINLIBPATH"]
+ os.environ["LIBPATHSTRICT"] = "T"
+ elif sys.platform == "osx" or sys.platform == "darwin":
+ self.env["DYLD_LIBRARY_PATH"] = os.path.join(
+ os.path.dirname(self.xrePath), "MacOS"
+ )
+ else: # unix or linux?
+ if "LD_LIBRARY_PATH" not in self.env or self.env["LD_LIBRARY_PATH"] is None:
+ self.env["LD_LIBRARY_PATH"] = self.xrePath
+ else:
+ self.env["LD_LIBRARY_PATH"] = ":".join(
+ [self.xrePath, self.env["LD_LIBRARY_PATH"]]
+ )
+
+ usingASan = "asan" in self.mozInfo and self.mozInfo["asan"]
+ usingTSan = "tsan" in self.mozInfo and self.mozInfo["tsan"]
+ if usingASan or usingTSan:
+ # symbolizer support
+ if "ASAN_SYMBOLIZER_PATH" in self.env and os.path.isfile(
+ self.env["ASAN_SYMBOLIZER_PATH"]
+ ):
+ llvmsym = self.env["ASAN_SYMBOLIZER_PATH"]
+ else:
+ llvmsym = os.path.join(
+ self.xrePath, "llvm-symbolizer" + self.mozInfo["bin_suffix"]
+ )
+ if os.path.isfile(llvmsym):
+ if usingASan:
+ self.env["ASAN_SYMBOLIZER_PATH"] = llvmsym
+ else:
+ oldTSanOptions = self.env.get("TSAN_OPTIONS", "")
+ self.env["TSAN_OPTIONS"] = "external_symbolizer_path={} {}".format(
+ llvmsym, oldTSanOptions
+ )
+ self.log.info("runxpcshelltests.py | using symbolizer at %s" % llvmsym)
+ else:
+ self.log.error(
+ "TEST-UNEXPECTED-FAIL | runxpcshelltests.py | "
+ "Failed to find symbolizer at %s" % llvmsym
+ )
+
+ return self.env
+
+ def getPipes(self):
+ """
+ Determine the value of the stdout and stderr for the test.
+ Return value is a list (pStdout, pStderr).
+ """
+ if self.interactive:
+ pStdout = None
+ pStderr = None
+ else:
+ if self.debuggerInfo and self.debuggerInfo.interactive:
+ pStdout = None
+ pStderr = None
+ else:
+ if sys.platform == "os2emx":
+ pStdout = None
+ else:
+ pStdout = PIPE
+ pStderr = STDOUT
+ return pStdout, pStderr
+
+ def verifyDirPath(self, dirname):
+ """
+ Simple wrapper to get the absolute path for a given directory name.
+ On a remote system, we need to overload this to work on the remote filesystem.
+ """
+ return os.path.abspath(dirname)
+
+ def trySetupNode(self):
+ """
+ Run node for HTTP/2 tests, if available, and updates mozinfo as appropriate.
+ """
+ if os.getenv("MOZ_ASSUME_NODE_RUNNING", None):
+ self.log.info("Assuming required node servers are already running")
+ if not os.getenv("MOZHTTP2_PORT", None):
+ self.log.warning(
+ "MOZHTTP2_PORT environment variable not set. "
+ "Tests requiring http/2 will fail."
+ )
+ return
+
+ # We try to find the node executable in the path given to us by the user in
+ # the MOZ_NODE_PATH environment variable
+ nodeBin = os.getenv("MOZ_NODE_PATH", None)
+ if not nodeBin and build:
+ nodeBin = build.substs.get("NODEJS")
+ if not nodeBin:
+ self.log.warning(
+ "MOZ_NODE_PATH environment variable not set. "
+ "Tests requiring http/2 will fail."
+ )
+ return
+
+ if not os.path.exists(nodeBin) or not os.path.isfile(nodeBin):
+ error = "node not found at MOZ_NODE_PATH %s" % (nodeBin)
+ self.log.error(error)
+ raise IOError(error)
+
+ self.log.info("Found node at %s" % (nodeBin,))
+
+ def startServer(name, serverJs):
+ if not os.path.exists(serverJs):
+ error = "%s not found at %s" % (name, serverJs)
+ self.log.error(error)
+ raise IOError(error)
+
+ # OK, we found our server, let's try to get it running
+ self.log.info("Found %s at %s" % (name, serverJs))
+ try:
+ # We pipe stdin to node because the server will exit when its
+ # stdin reaches EOF
+ with popenCleanupHack():
+ process = Popen(
+ [nodeBin, serverJs],
+ stdin=PIPE,
+ stdout=PIPE,
+ stderr=PIPE,
+ env=self.env,
+ cwd=os.getcwd(),
+ universal_newlines=True,
+ )
+ self.nodeProc[name] = process
+
+ # Check to make sure the server starts properly by waiting for it to
+ # tell us it's started
+ msg = process.stdout.readline()
+ if "server listening" in msg:
+ searchObj = re.search(
+ r"HTTP2 server listening on ports ([0-9]+),([0-9]+)", msg, 0
+ )
+ if searchObj:
+ self.env["MOZHTTP2_PORT"] = searchObj.group(1)
+ self.env["MOZNODE_EXEC_PORT"] = searchObj.group(2)
+ except OSError as e:
+ # This occurs if the subprocess couldn't be started
+ self.log.error("Could not run %s server: %s" % (name, str(e)))
+ raise
+
+ myDir = os.path.split(os.path.abspath(__file__))[0]
+ startServer("moz-http2", os.path.join(myDir, "moz-http2", "moz-http2.js"))
+
+ def shutdownNode(self):
+ """
+ Shut down our node process, if it exists
+ """
+ for name, proc in six.iteritems(self.nodeProc):
+ self.log.info("Node %s server shutting down ..." % name)
+ if proc.poll() is not None:
+ self.log.info("Node server %s already dead %s" % (name, proc.poll()))
+ else:
+ proc.terminate()
+
+ def dumpOutput(fd, label):
+ firstTime = True
+ for msg in fd:
+ if firstTime:
+ firstTime = False
+ self.log.info("Process %s" % label)
+ self.log.info(msg)
+
+ dumpOutput(proc.stdout, "stdout")
+ dumpOutput(proc.stderr, "stderr")
+ self.nodeProc = {}
+
+ def startHttp3Server(self):
+ """
+ Start a Http3 test server.
+ """
+ binSuffix = ""
+ if sys.platform == "win32":
+ binSuffix = ".exe"
+
+ http3ServerPath = self.http3server
+ if not http3ServerPath:
+ http3ServerPath = os.path.join(
+ SCRIPT_DIR, "http3server", "http3server" + binSuffix
+ )
+ if build:
+ http3ServerPath = os.path.join(
+ build.topobjdir, "dist", "bin", "http3server" + binSuffix
+ )
+
+ if not os.path.exists(http3ServerPath):
+ self.log.warning(
+ "Http3 server not found at "
+ + http3ServerPath
+ + ". Tests requiring http/3 will fail."
+ )
+ return
+
+ # OK, we found our server, let's try to get it running
+ self.log.info("Found %s" % (http3ServerPath))
+ try:
+ dbPath = os.path.join(SCRIPT_DIR, "http3server", "http3serverDB")
+ if build:
+ dbPath = os.path.join(
+ build.topsrcdir, "netwerk", "test", "http3serverDB"
+ )
+ self.log.info("Using %s" % (dbPath))
+ # We pipe stdin to the server because it will exit when its stdin
+ # reaches EOF
+ with popenCleanupHack():
+ process = Popen(
+ [http3ServerPath, dbPath],
+ stdin=PIPE,
+ stdout=PIPE,
+ stderr=PIPE,
+ env=self.env,
+ cwd=os.getcwd(),
+ universal_newlines=True,
+ )
+ self.http3ServerProc["http3Server"] = process
+
+ # Check to make sure the server starts properly by waiting for it to
+ # tell us it's started
+ msg = process.stdout.readline()
+ if "server listening" in msg:
+ searchObj = re.search(
+ r"HTTP3 server listening on ports ([0-9]+), ([0-9]+), ([0-9]+) and ([0-9]+)."
+ " EchConfig is @([\x00-\x7F]+)@",
+ msg,
+ 0,
+ )
+ if searchObj:
+ self.env["MOZHTTP3_PORT"] = searchObj.group(1)
+ self.env["MOZHTTP3_PORT_FAILED"] = searchObj.group(2)
+ self.env["MOZHTTP3_PORT_ECH"] = searchObj.group(3)
+ self.env["MOZHTTP3_PORT_NO_RESPONSE"] = searchObj.group(4)
+ self.env["MOZHTTP3_ECH"] = searchObj.group(5)
+ except OSError as e:
+ # This occurs if the subprocess couldn't be started
+ self.log.error("Could not run the http3 server: %s" % (str(e)))
+
+ def shutdownHttp3Server(self):
+ """
+ Shutdown our http3Server process, if it exists
+ """
+ for name, proc in six.iteritems(self.http3ServerProc):
+ self.log.info("%s server shutting down ..." % name)
+ if proc.poll() is not None:
+ self.log.info("Http3 server %s already dead %s" % (name, proc.poll()))
+ else:
+ proc.terminate()
+ retries = 0
+ while proc.poll() is None:
+ time.sleep(0.1)
+ retries += 1
+ if retries > 40:
+ self.log.info("Killing proc")
+ proc.kill()
+ break
+
+ def dumpOutput(fd, label):
+ firstTime = True
+ for msg in fd:
+ if firstTime:
+ firstTime = False
+ self.log.info("Process %s" % label)
+ self.log.info(msg)
+
+ dumpOutput(proc.stdout, "stdout")
+ dumpOutput(proc.stderr, "stderr")
+ self.http3ServerProc = {}
+
+ def buildXpcsRunArgs(self):
+ """
+ Add arguments to run the test or make it interactive.
+ """
+ if self.interactive:
+ self.xpcsRunArgs = [
+ "-e",
+ 'print("To start the test, type |_execute_test();|.");',
+ "-i",
+ ]
+ else:
+ self.xpcsRunArgs = ["-e", "_execute_test(); quit(0);"]
+
+ def addTestResults(self, test):
+ self.passCount += test.passCount
+ self.failCount += test.failCount
+ self.todoCount += test.todoCount
+
+ def updateMozinfo(self, prefs, options):
+ # Handle filenames in mozInfo
+ if not isinstance(self.mozInfo, dict):
+ mozInfoFile = self.mozInfo
+ if not os.path.isfile(mozInfoFile):
+ self.log.error(
+ "Error: couldn't find mozinfo.json at '%s'. Perhaps you "
+ "need to use --build-info-json?" % mozInfoFile
+ )
+ return False
+ self.mozInfo = json.load(open(mozInfoFile))
+
+ # mozinfo.info is used as kwargs. Some builds are done with
+ # an older Python that can't handle Unicode keys in kwargs.
+ # All of the keys in question should be ASCII.
+ fixedInfo = {}
+ for k, v in self.mozInfo.items():
+ if isinstance(k, bytes):
+ k = k.decode("utf-8")
+ fixedInfo[k] = v
+ self.mozInfo = fixedInfo
+
+ self.mozInfo["fission"] = prefs.get("fission.autostart", True)
+
+ self.mozInfo["serviceworker_e10s"] = True
+
+ self.mozInfo["verify"] = options.get("verify", False)
+
+ self.mozInfo["socketprocess_networking"] = prefs.get(
+ "network.http.network_access_on_socket_process.enabled", False
+ )
+
+ self.mozInfo["condprof"] = options.get("conditionedProfile", False)
+
+ self.mozInfo["msix"] = options.get(
+ "app_binary"
+ ) is not None and "WindowsApps" in options.get("app_binary", "")
+
+ mozinfo.update(self.mozInfo)
+
+ return True
+
+ @property
+ def conditioned_profile_copy(self):
+ """Returns a copy of the original conditioned profile that was created."""
+ condprof_copy = os.path.join(tempfile.mkdtemp(), "profile")
+ shutil.copytree(
+ self.conditioned_profile_dir,
+ condprof_copy,
+ ignore=shutil.ignore_patterns("lock"),
+ )
+ self.log.info("Created a conditioned-profile copy: %s" % condprof_copy)
+ return condprof_copy
+
+ def downloadConditionedProfile(self, profile_scenario, app):
+ from condprof.client import get_profile
+ from condprof.util import get_current_platform, get_version
+
+ if self.conditioned_profile_dir:
+ # We already have a directory, so provide a copy that
+ # will get deleted after it's done with
+ return self.conditioned_profile_dir
+
+ # create a temp file to help ensure uniqueness
+ temp_download_dir = tempfile.mkdtemp()
+ self.log.info(
+ "Making temp_download_dir from inside get_conditioned_profile {}".format(
+ temp_download_dir
+ )
+ )
+ # call condprof's client API to yield our platform-specific
+ # conditioned-profile binary
+ platform = get_current_platform()
+ version = None
+ if isinstance(app, str):
+ version = get_version(app)
+
+ if not profile_scenario:
+ profile_scenario = "settled"
+ try:
+ cond_prof_target_dir = get_profile(
+ temp_download_dir,
+ platform,
+ profile_scenario,
+ repo="mozilla-central",
+ version=version,
+ retries=2,
+ )
+ except Exception:
+ if version is None:
+ # any other error is a showstopper
+ self.log.critical("Could not get the conditioned profile")
+ traceback.print_exc()
+ raise
+ version = None
+ try:
+ self.log.info("Retrying a profile with no version specified")
+ cond_prof_target_dir = get_profile(
+ temp_download_dir,
+ platform,
+ profile_scenario,
+ repo="mozilla-central",
+ version=version,
+ )
+ except Exception:
+ self.log.critical("Could not get the conditioned profile")
+ traceback.print_exc()
+ raise
+
+ # now get the full directory path to our fetched conditioned profile
+ self.conditioned_profile_dir = os.path.join(
+ temp_download_dir, cond_prof_target_dir
+ )
+ if not os.path.exists(cond_prof_target_dir):
+ self.log.critical(
+ "Can't find target_dir {}, from get_profile()"
+ "temp_download_dir {}, platform {}, scenario {}".format(
+ cond_prof_target_dir, temp_download_dir, platform, profile_scenario
+ )
+ )
+ raise OSError
+
+ self.log.info(
+ "Original self.conditioned_profile_dir is now set: {}".format(
+ self.conditioned_profile_dir
+ )
+ )
+ return self.conditioned_profile_copy
+
+ def runSelfTest(self):
+ import unittest
+
+ import selftest
+
+ this = self
+
+ class XPCShellTestsTests(selftest.XPCShellTestsTests):
+ def __init__(self, name):
+ unittest.TestCase.__init__(self, name)
+ self.testing_modules = this.testingModulesDir
+ self.xpcshellBin = this.xpcshell
+ self.app_binary = this.app_binary
+ self.utility_path = this.utility_path
+ self.symbols_path = this.symbolsPath
+
+ old_info = dict(mozinfo.info)
+ try:
+ suite = unittest.TestLoader().loadTestsFromTestCase(XPCShellTestsTests)
+ return unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful()
+ finally:
+ # The self tests modify mozinfo, so we need to reset it.
+ mozinfo.info.clear()
+ mozinfo.update(old_info)
+
+ def runTests(self, options, testClass=XPCShellTestThread, mobileArgs=None):
+ """
+ Run xpcshell tests.
+ """
+ global gotSIGINT
+
+ # Number of times to repeat test(s) in --verify mode
+ VERIFY_REPEAT = 10
+
+ if isinstance(options, Namespace):
+ options = vars(options)
+
+ # Try to guess modules directory.
+ # This somewhat grotesque hack allows the buildbot machines to find the
+ # modules directory without having to configure the buildbot hosts. This
+ # code path should never be executed in local runs because the build system
+ # should always set this argument.
+ if not options.get("testingModulesDir"):
+ possible = os.path.join(here, os.path.pardir, "modules")
+
+ if os.path.isdir(possible):
+ testingModulesDir = possible
+
+ if options.get("rerun_failures"):
+ if os.path.exists(options.get("failure_manifest")):
+ rerun_manifest = os.path.join(
+ os.path.dirname(options["failure_manifest"]), "rerun.ini"
+ )
+ shutil.copyfile(options["failure_manifest"], rerun_manifest)
+ os.remove(options["failure_manifest"])
+ else:
+ self.log.error("No failures were found to re-run.")
+ sys.exit(1)
+
+ if options.get("testingModulesDir"):
+ # The resource loader expects native paths. Depending on how we were
+ # invoked, a UNIX style path may sneak in on Windows. We try to
+ # normalize that.
+ testingModulesDir = os.path.normpath(options["testingModulesDir"])
+
+ if not os.path.isabs(testingModulesDir):
+ testingModulesDir = os.path.abspath(testingModulesDir)
+
+ if not testingModulesDir.endswith(os.path.sep):
+ testingModulesDir += os.path.sep
+
+ self.debuggerInfo = None
+
+ if options.get("debugger"):
+ self.debuggerInfo = mozdebug.get_debugger_info(
+ options.get("debugger"),
+ options.get("debuggerArgs"),
+ options.get("debuggerInteractive"),
+ )
+
+ self.jsDebuggerInfo = None
+ if options.get("jsDebugger"):
+ # A namedtuple let's us keep .port instead of ['port']
+ JSDebuggerInfo = namedtuple("JSDebuggerInfo", ["port"])
+ self.jsDebuggerInfo = JSDebuggerInfo(port=options["jsDebuggerPort"])
+
+ self.app_binary = options.get("app_binary")
+ self.xpcshell = options.get("xpcshell")
+ self.http3server = options.get("http3server")
+ self.xrePath = options.get("xrePath")
+ self.utility_path = options.get("utility_path")
+ self.appPath = options.get("appPath")
+ self.symbolsPath = options.get("symbolsPath")
+ self.tempDir = os.path.normpath(options.get("tempDir") or tempfile.gettempdir())
+ self.manifest = options.get("manifest")
+ self.dump_tests = options.get("dump_tests")
+ self.interactive = options.get("interactive")
+ self.verbose = options.get("verbose")
+ self.verboseIfFails = options.get("verboseIfFails")
+ self.keepGoing = options.get("keepGoing")
+ self.logfiles = options.get("logfiles")
+ self.totalChunks = options.get("totalChunks", 1)
+ self.thisChunk = options.get("thisChunk")
+ self.profileName = options.get("profileName") or "xpcshell"
+ self.mozInfo = options.get("mozInfo")
+ self.testingModulesDir = testingModulesDir
+ self.sequential = options.get("sequential")
+ self.failure_manifest = options.get("failure_manifest")
+ self.threadCount = options.get("threadCount") or NUM_THREADS
+ self.jscovdir = options.get("jscovdir")
+ self.headless = options.get("headless")
+ self.runFailures = options.get("runFailures")
+ self.timeoutAsPass = options.get("timeoutAsPass")
+ self.crashAsPass = options.get("crashAsPass")
+ self.conditionedProfile = options.get("conditionedProfile")
+
+ self.testCount = 0
+ self.passCount = 0
+ self.failCount = 0
+ self.todoCount = 0
+
+ if self.conditionedProfile:
+ self.conditioned_profile_dir = self.downloadConditionedProfile(
+ "full", self.appPath
+ )
+ options["self_test"] = False
+ if not options["test_tags"]:
+ options["test_tags"] = []
+ options["test_tags"].append("condprof")
+
+ self.setAbsPath()
+
+ eprefs = options.get("extraPrefs") or []
+ # enable fission by default
+ if options.get("disableFission"):
+ eprefs.append("fission.autostart=false")
+ else:
+ # should be by default, just in case
+ eprefs.append("fission.autostart=true")
+
+ prefs = self.buildPrefsFile(eprefs)
+ self.buildXpcsRunArgs()
+
+ self.event = Event()
+
+ if not self.updateMozinfo(prefs, options):
+ return False
+
+ self.log.info(
+ "These variables are available in the mozinfo environment and "
+ "can be used to skip tests conditionally:"
+ )
+ for info in sorted(self.mozInfo.items(), key=lambda item: item[0]):
+ self.log.info(" {key}: {value}".format(key=info[0], value=info[1]))
+
+ if options.get("self_test"):
+ if not self.runSelfTest():
+ return False
+
+ if (
+ "tsan" in self.mozInfo
+ and self.mozInfo["tsan"]
+ and not options.get("threadCount")
+ ):
+ # TSan requires significantly more memory, so reduce the amount of parallel
+ # tests we run to avoid OOMs and timeouts.
+ # pylint --py3k W1619
+ self.threadCount = self.threadCount / 2
+
+ self.stack_fixer_function = None
+ if self.utility_path and os.path.exists(self.utility_path):
+ self.stack_fixer_function = get_stack_fixer_function(
+ self.utility_path, self.symbolsPath
+ )
+
+ # buildEnvironment() needs mozInfo, so we call it after mozInfo is initialized.
+ self.buildEnvironment()
+
+ # The appDirKey is a optional entry in either the default or individual test
+ # sections that defines a relative application directory for test runs. If
+ # defined we pass 'grePath/$appDirKey' for the -a parameter of the xpcshell
+ # test harness.
+ appDirKey = None
+ if "appname" in self.mozInfo:
+ appDirKey = self.mozInfo["appname"] + "-appdir"
+
+ # We have to do this before we run tests that depend on having the node
+ # http/2 server.
+ self.trySetupNode()
+
+ self.startHttp3Server()
+
+ pStdout, pStderr = self.getPipes()
+
+ self.buildTestList(
+ options.get("test_tags"), options.get("testPaths"), options.get("verify")
+ )
+ if self.singleFile:
+ self.sequential = True
+
+ if options.get("shuffle"):
+ random.shuffle(self.alltests)
+
+ self.cleanup_dir_list = []
+
+ kwargs = {
+ "appPath": self.appPath,
+ "xrePath": self.xrePath,
+ "utility_path": self.utility_path,
+ "testingModulesDir": self.testingModulesDir,
+ "debuggerInfo": self.debuggerInfo,
+ "jsDebuggerInfo": self.jsDebuggerInfo,
+ "httpdJSPath": self.httpdJSPath,
+ "headJSPath": self.headJSPath,
+ "tempDir": self.tempDir,
+ "testharnessdir": self.testharnessdir,
+ "profileName": self.profileName,
+ "singleFile": self.singleFile,
+ "env": self.env, # making a copy of this in the testthreads
+ "symbolsPath": self.symbolsPath,
+ "logfiles": self.logfiles,
+ "app_binary": self.app_binary,
+ "xpcshell": self.xpcshell,
+ "xpcsRunArgs": self.xpcsRunArgs,
+ "failureManifest": self.failure_manifest,
+ "jscovdir": self.jscovdir,
+ "harness_timeout": self.harness_timeout,
+ "stack_fixer_function": self.stack_fixer_function,
+ "event": self.event,
+ "cleanup_dir_list": self.cleanup_dir_list,
+ "pStdout": pStdout,
+ "pStderr": pStderr,
+ "keep_going": self.keepGoing,
+ "log": self.log,
+ "interactive": self.interactive,
+ "app_dir_key": appDirKey,
+ "rootPrefsFile": self.prefsFile,
+ "extraPrefs": options.get("extraPrefs") or [],
+ "verboseIfFails": self.verboseIfFails,
+ "headless": self.headless,
+ "runFailures": self.runFailures,
+ "timeoutAsPass": self.timeoutAsPass,
+ "crashAsPass": self.crashAsPass,
+ "conditionedProfileDir": self.conditioned_profile_dir,
+ }
+
+ if self.sequential:
+ # Allow user to kill hung xpcshell subprocess with SIGINT
+ # when we are only running tests sequentially.
+ signal.signal(signal.SIGINT, markGotSIGINT)
+
+ if self.debuggerInfo:
+ # Force a sequential run
+ self.sequential = True
+
+ # If we have an interactive debugger, disable SIGINT entirely.
+ if self.debuggerInfo.interactive:
+ signal.signal(signal.SIGINT, lambda signum, frame: None)
+
+ if "lldb" in self.debuggerInfo.path:
+ # Ask people to start debugging using 'process launch', see bug 952211.
+ self.log.info(
+ "It appears that you're using LLDB to debug this test. "
+ + "Please use the 'process launch' command instead of "
+ "the 'run' command to start xpcshell."
+ )
+
+ if self.jsDebuggerInfo:
+ # The js debugger magic needs more work to do the right thing
+ # if debugging multiple files.
+ if len(self.alltests) != 1:
+ self.log.error(
+ "Error: --jsdebugger can only be used with a single test!"
+ )
+ return False
+
+ # The test itself needs to know whether it is a tsan build, since
+ # that has an effect on interpretation of the process return value.
+ usingTSan = "tsan" in self.mozInfo and self.mozInfo["tsan"]
+
+ # create a queue of all tests that will run
+ tests_queue = deque()
+ # also a list for the tests that need to be run sequentially
+ sequential_tests = []
+ status = None
+ if not options.get("verify"):
+ for test_object in self.alltests:
+ # Test identifiers are provided for the convenience of logging. These
+ # start as path names but are rewritten in case tests from the same path
+ # are re-run.
+
+ path = test_object["path"]
+
+ if self.singleFile and not path.endswith(self.singleFile):
+ continue
+
+ self.testCount += 1
+
+ test = testClass(
+ test_object,
+ verbose=self.verbose or test_object.get("verbose") == "true",
+ usingTSan=usingTSan,
+ mobileArgs=mobileArgs,
+ **kwargs,
+ )
+ if "run-sequentially" in test_object or self.sequential:
+ sequential_tests.append(test)
+ else:
+ tests_queue.append(test)
+
+ status = self.runTestList(
+ tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
+ )
+ else:
+ #
+ # Test verification: Run each test many times, in various configurations,
+ # in hopes of finding intermittent failures.
+ #
+
+ def step1():
+ # Run tests sequentially. Parallel mode would also work, except that
+ # the logging system gets confused when 2 or more tests with the same
+ # name run at the same time.
+ sequential_tests = []
+ for i in range(VERIFY_REPEAT):
+ self.testCount += 1
+ test = testClass(
+ test_object, retry=False, mobileArgs=mobileArgs, **kwargs
+ )
+ sequential_tests.append(test)
+ status = self.runTestList(
+ tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
+ )
+ return status
+
+ def step2():
+ # Run tests sequentially, with MOZ_CHAOSMODE enabled.
+ sequential_tests = []
+ self.env["MOZ_CHAOSMODE"] = "0xfb"
+ # chaosmode runs really slow, allow tests extra time to pass
+ self.harness_timeout = self.harness_timeout * 2
+ for i in range(VERIFY_REPEAT):
+ self.testCount += 1
+ test = testClass(
+ test_object, retry=False, mobileArgs=mobileArgs, **kwargs
+ )
+ sequential_tests.append(test)
+ status = self.runTestList(
+ tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
+ )
+ self.harness_timeout = self.harness_timeout / 2
+ return status
+
+ steps = [
+ ("1. Run each test %d times, sequentially." % VERIFY_REPEAT, step1),
+ (
+ "2. Run each test %d times, sequentially, in chaos mode."
+ % VERIFY_REPEAT,
+ step2,
+ ),
+ ]
+ startTime = datetime.now()
+ maxTime = timedelta(seconds=options["verifyMaxTime"])
+ for test_object in self.alltests:
+ stepResults = {}
+ for (descr, step) in steps:
+ stepResults[descr] = "not run / incomplete"
+ finalResult = "PASSED"
+ for (descr, step) in steps:
+ if (datetime.now() - startTime) > maxTime:
+ self.log.info(
+ "::: Test verification is taking too long: Giving up!"
+ )
+ self.log.info(
+ "::: So far, all checks passed, but not "
+ "all checks were run."
+ )
+ break
+ self.log.info(":::")
+ self.log.info('::: Running test verification step "%s"...' % descr)
+ self.log.info(":::")
+ status = step()
+ if status is not True:
+ stepResults[descr] = "FAIL"
+ finalResult = "FAILED!"
+ break
+ stepResults[descr] = "Pass"
+ self.log.info(":::")
+ self.log.info(
+ "::: Test verification summary for: %s" % test_object["path"]
+ )
+ self.log.info(":::")
+ for descr in sorted(stepResults.keys()):
+ self.log.info("::: %s : %s" % (descr, stepResults[descr]))
+ self.log.info(":::")
+ self.log.info("::: Test verification %s" % finalResult)
+ self.log.info(":::")
+
+ self.shutdownNode()
+ self.shutdownHttp3Server()
+
+ return status
+
+ def start_test(self, test):
+ test.start()
+
+ def test_ended(self, test):
+ pass
+
+ def runTestList(
+ self, tests_queue, sequential_tests, testClass, mobileArgs, **kwargs
+ ):
+ if self.sequential:
+ self.log.info("Running tests sequentially.")
+ else:
+ self.log.info("Using at most %d threads." % self.threadCount)
+
+ # keep a set of threadCount running tests and start running the
+ # tests in the queue at most threadCount at a time
+ running_tests = set()
+ keep_going = True
+ exceptions = []
+ tracebacks = []
+ self.try_again_list = []
+
+ tests_by_manifest = defaultdict(list)
+ for test in self.alltests:
+ group = test["manifest"]
+ if "ancestor_manifest" in test:
+ ancestor_manifest = normsep(test["ancestor_manifest"])
+ # Only change the group id if ancestor is not the generated root manifest.
+ if "/" in ancestor_manifest:
+ group = "{}:{}".format(ancestor_manifest, group)
+ tests_by_manifest[group].append(test["id"])
+
+ self.log.suite_start(tests_by_manifest, name="xpcshell")
+
+ while tests_queue or running_tests:
+ # if we're not supposed to continue and all of the running tests
+ # are done, stop
+ if not keep_going and not running_tests:
+ break
+
+ # if there's room to run more tests, start running them
+ while (
+ keep_going and tests_queue and (len(running_tests) < self.threadCount)
+ ):
+ test = tests_queue.popleft()
+ running_tests.add(test)
+ self.start_test(test)
+
+ # queue is full (for now) or no more new tests,
+ # process the finished tests so far
+
+ # wait for at least one of the tests to finish
+ self.event.wait(1)
+ self.event.clear()
+
+ # find what tests are done (might be more than 1)
+ done_tests = set()
+ for test in running_tests:
+ if test.done:
+ self.test_ended(test)
+ done_tests.add(test)
+ test.join(
+ 1
+ ) # join with timeout so we don't hang on blocked threads
+ # if the test had trouble, we will try running it again
+ # at the end of the run
+ if test.retry or test.is_alive():
+ # if the join call timed out, test.is_alive => True
+ self.try_again_list.append(test.test_object)
+ continue
+ # did the test encounter any exception?
+ if test.exception:
+ exceptions.append(test.exception)
+ tracebacks.append(test.traceback)
+ # we won't add any more tests, will just wait for
+ # the currently running ones to finish
+ keep_going = False
+ keep_going = keep_going and test.keep_going
+ self.addTestResults(test)
+
+ # make room for new tests to run
+ running_tests.difference_update(done_tests)
+
+ if keep_going:
+ # run the other tests sequentially
+ for test in sequential_tests:
+ if not keep_going:
+ self.log.error(
+ "TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so "
+ "stopped run. (Use --keep-going to keep running tests "
+ "after killing one with SIGINT)"
+ )
+ break
+ self.start_test(test)
+ test.join()
+ self.test_ended(test)
+ if (test.failCount > 0 or test.passCount <= 0) and os.environ.get(
+ "MOZ_AUTOMATION", 0
+ ) != 0:
+ self.try_again_list.append(test.test_object)
+ continue
+ self.addTestResults(test)
+ # did the test encounter any exception?
+ if test.exception:
+ exceptions.append(test.exception)
+ tracebacks.append(test.traceback)
+ break
+ keep_going = test.keep_going
+
+ # retry tests that failed when run in parallel
+ if self.try_again_list:
+ self.log.info("Retrying tests that failed when run in parallel.")
+ for test_object in self.try_again_list:
+ test = testClass(
+ test_object,
+ retry=False,
+ verbose=self.verbose,
+ mobileArgs=mobileArgs,
+ **kwargs,
+ )
+ self.start_test(test)
+ test.join()
+ self.test_ended(test)
+ self.addTestResults(test)
+ # did the test encounter any exception?
+ if test.exception:
+ exceptions.append(test.exception)
+ tracebacks.append(test.traceback)
+ break
+ keep_going = test.keep_going
+
+ # restore default SIGINT behaviour
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ # Clean up any slacker directories that might be lying around
+ # Some might fail because of windows taking too long to unlock them.
+ # We don't do anything if this fails because the test machines will have
+ # their $TEMP dirs cleaned up on reboot anyway.
+ for directory in self.cleanup_dir_list:
+ try:
+ shutil.rmtree(directory)
+ except Exception:
+ self.log.info("%s could not be cleaned up." % directory)
+
+ if exceptions:
+ self.log.info("Following exceptions were raised:")
+ for t in tracebacks:
+ self.log.error(t)
+ raise exceptions[0]
+
+ if self.testCount == 0 and os.environ.get("MOZ_AUTOMATION") != "1":
+ self.log.error("No tests run. Did you pass an invalid --test-path?")
+ self.failCount = 1
+
+ # doing this allows us to pass the mozharness parsers that
+ # report an orange job for failCount>0
+ if self.runFailures:
+ passed = self.passCount
+ self.passCount = self.failCount
+ self.failCount = passed
+
+ self.log.info("INFO | Result summary:")
+ self.log.info("INFO | Passed: %d" % self.passCount)
+ self.log.info("INFO | Failed: %d" % self.failCount)
+ self.log.info("INFO | Todo: %d" % self.todoCount)
+ self.log.info("INFO | Retried: %d" % len(self.try_again_list))
+
+ if gotSIGINT and not keep_going:
+ self.log.error(
+ "TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so stopped run. "
+ "(Use --keep-going to keep running tests after "
+ "killing one with SIGINT)"
+ )
+ return False
+
+ self.log.suite_end()
+ return self.runFailures or self.failCount == 0
+
+
+def main():
+ parser = parser_desktop()
+ options = parser.parse_args()
+
+ log = commandline.setup_logging("XPCShell", options, {"tbpl": sys.stdout})
+
+ if options.xpcshell is None and options.app_binary is None:
+ log.error(
+ "Must provide path to xpcshell using --xpcshell or Firefox using --app-binary"
+ )
+ sys.exit(1)
+
+ if options.xpcshell is not None and options.app_binary is not None:
+ log.error(
+ "Cannot provide --xpcshell and --app-binary - they are mutually exclusive options. Choose one."
+ )
+ sys.exit(1)
+
+ xpcsh = XPCShellTests(log)
+
+ if options.interactive and not options.testPath:
+ log.error("Error: You must specify a test filename in interactive mode!")
+ sys.exit(1)
+
+ if not xpcsh.runTests(options):
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/testing/xpcshell/selftest.py b/testing/xpcshell/selftest.py
new file mode 100755
index 0000000000..513567530d
--- /dev/null
+++ b/testing/xpcshell/selftest.py
@@ -0,0 +1,1474 @@
+#!/usr/bin/env python
+#
+# Any copyright is dedicated to the Public Domain.
+# http://creativecommons.org/publicdomain/zero/1.0/
+#
+
+import os
+import pprint
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+
+import mozinfo
+import six
+from mozlog import structured
+from runxpcshelltests import XPCShellTests
+
+TEST_PASS_STRING = "TEST-PASS"
+TEST_FAIL_STRING = "TEST-UNEXPECTED-FAIL"
+
+SIMPLE_PASSING_TEST = "function run_test() { Assert.ok(true); }"
+SIMPLE_FAILING_TEST = "function run_test() { Assert.ok(false); }"
+SIMPLE_PREFCHECK_TEST = """
+function run_test() {
+ Assert.ok(Services.prefs.getBoolPref("fake.pref.to.test"));
+}
+"""
+
+SIMPLE_UNCAUGHT_REJECTION_TEST = """
+function run_test() {
+ Promise.reject(new Error("Test rejection."));
+ Assert.ok(true);
+}
+"""
+
+ADD_TEST_SIMPLE = """
+function run_test() { run_next_test(); }
+
+add_test(function test_simple() {
+ Assert.ok(true);
+ run_next_test();
+});
+"""
+
+ADD_TEST_FAILING = """
+function run_test() { run_next_test(); }
+
+add_test(function test_failing() {
+ Assert.ok(false);
+ run_next_test();
+});
+"""
+
+ADD_TEST_UNCAUGHT_REJECTION = """
+function run_test() { run_next_test(); }
+
+add_test(function test_uncaught_rejection() {
+ Promise.reject(new Error("Test rejection."));
+ run_next_test();
+});
+"""
+
+CHILD_TEST_PASSING = """
+function run_test () { run_next_test(); }
+
+add_test(function test_child_simple () {
+ run_test_in_child("test_pass.js");
+ run_next_test();
+});
+"""
+
+CHILD_TEST_FAILING = """
+function run_test () { run_next_test(); }
+
+add_test(function test_child_simple () {
+ run_test_in_child("test_fail.js");
+ run_next_test();
+});
+"""
+
+CHILD_HARNESS_SIMPLE = """
+function run_test () { run_next_test(); }
+
+add_test(function test_child_assert () {
+ do_load_child_test_harness();
+ do_test_pending("test child assertion");
+ sendCommand("Assert.ok(true);", do_test_finished);
+ run_next_test();
+});
+"""
+
+CHILD_TEST_HANG = """
+function run_test () { run_next_test(); }
+
+add_test(function test_child_simple () {
+ do_test_pending("hang test");
+ do_load_child_test_harness();
+ sendCommand("_testLogger.info('CHILD-TEST-STARTED'); " +
+ + "const _TEST_FILE=['test_pass.js']; _execute_test(); ",
+ do_test_finished);
+ run_next_test();
+});
+"""
+
+SIMPLE_LOOPING_TEST = """
+function run_test () { run_next_test(); }
+
+add_test(function test_loop () {
+ do_test_pending()
+});
+"""
+
+PASSING_TEST_UNICODE = b"""
+function run_test () { run_next_test(); }
+
+add_test(function test_unicode_print () {
+ Assert.equal("\u201c\u201d", "\u201c\u201d");
+ run_next_test();
+});
+"""
+
+ADD_TASK_SINGLE = """
+function run_test() { run_next_test(); }
+
+add_task(async function test_task() {
+ await Promise.resolve(true);
+ await Promise.resolve(false);
+});
+"""
+
+ADD_TASK_MULTIPLE = """
+function run_test() { run_next_test(); }
+
+add_task(async function test_task() {
+ await Promise.resolve(true);
+});
+
+add_task(async function test_2() {
+ await Promise.resolve(true);
+});
+"""
+
+ADD_TASK_REJECTED = """
+function run_test() { run_next_test(); }
+
+add_task(async function test_failing() {
+ await Promise.reject(new Error("I fail."));
+});
+"""
+
+ADD_TASK_FAILURE_INSIDE = """
+function run_test() { run_next_test(); }
+
+add_task(async function test() {
+ let result = await Promise.resolve(false);
+
+ Assert.ok(result);
+});
+"""
+
+ADD_TASK_RUN_NEXT_TEST = """
+function run_test() { run_next_test(); }
+
+add_task(function () {
+ Assert.ok(true);
+
+ run_next_test();
+});
+"""
+
+ADD_TASK_STACK_TRACE = """
+function run_test() { run_next_test(); }
+
+add_task(async function this_test_will_fail() {
+ for (let i = 0; i < 10; ++i) {
+ await Promise.resolve();
+ }
+ Assert.ok(false);
+});
+"""
+
+ADD_TASK_SKIP = """
+add_task(async function skipMeNot1() {
+ Assert.ok(true, "Well well well.");
+});
+
+add_task(async function skipMe1() {
+ Assert.ok(false, "Not skipped after all.");
+}).skip();
+
+add_task(async function skipMeNot2() {
+ Assert.ok(true, "Well well well.");
+});
+
+add_task(async function skipMeNot3() {
+ Assert.ok(true, "Well well well.");
+});
+
+add_task(async function skipMe2() {
+ Assert.ok(false, "Not skipped after all.");
+}).skip();
+"""
+
+ADD_TASK_SKIPALL = """
+add_task(async function skipMe1() {
+ Assert.ok(false, "Not skipped after all.");
+});
+
+add_task(async function skipMe2() {
+ Assert.ok(false, "Not skipped after all.");
+}).skip();
+
+add_task(async function skipMe3() {
+ Assert.ok(false, "Not skipped after all.");
+}).only();
+
+add_task(async function skipMeNot() {
+ Assert.ok(true, "Well well well.");
+}).only();
+
+add_task(async function skipMe4() {
+ Assert.ok(false, "Not skipped after all.");
+});
+"""
+
+ADD_TEST_THROW_STRING = """
+function run_test() {do_throw("Passing a string to do_throw")};
+"""
+
+ADD_TEST_THROW_OBJECT = """
+let error = {
+ message: "Error object",
+ fileName: "failure.js",
+ stack: "ERROR STACK",
+ toString: function() {return this.message;}
+};
+function run_test() {do_throw(error)};
+"""
+
+ADD_TEST_REPORT_OBJECT = """
+let error = {
+ message: "Error object",
+ fileName: "failure.js",
+ stack: "ERROR STACK",
+ toString: function() {return this.message;}
+};
+function run_test() {do_report_unexpected_exception(error)};
+"""
+
+ADD_TEST_VERBOSE = """
+function run_test() {info("a message from info")};
+"""
+
+# A test for genuine JS-generated Error objects
+ADD_TEST_REPORT_REF_ERROR = """
+function run_test() {
+ let obj = {blah: 0};
+ try {
+ obj.noSuchFunction();
+ }
+ catch (error) {
+ do_report_unexpected_exception(error);
+ }
+};
+"""
+
+# A test for failure to load a test due to a syntax error
+LOAD_ERROR_SYNTAX_ERROR = """
+function run_test(
+"""
+
+# A test for failure to load a test due to an error other than a syntax error
+LOAD_ERROR_OTHER_ERROR = """
+"use strict";
+no_such_var = "foo"; // assignment to undeclared variable
+"""
+
+# A test that crashes outright.
+TEST_CRASHING = """
+function run_test () {
+ const { ctypes } = ChromeUtils.import("resource://gre/modules/ctypes.jsm");
+ let zero = new ctypes.intptr_t(8);
+ let badptr = ctypes.cast(zero, ctypes.PointerType(ctypes.int32_t));
+ badptr.contents;
+}
+"""
+
+# A test for asynchronous cleanup functions
+ASYNC_CLEANUP = """
+function run_test() {
+ let { PromiseUtils } = ChromeUtils.importESModule(
+ "resource://gre/modules/PromiseUtils.sys.mjs"
+ );
+
+ // The list of checkpoints in the order we encounter them.
+ let checkpoints = [];
+
+ // Cleanup tasks, in reverse order
+ registerCleanupFunction(function cleanup_checkout() {
+ Assert.equal(checkpoints.join(""), "123456");
+ info("At this stage, the test has succeeded");
+ do_throw("Throwing an error to force displaying the log");
+ });
+
+ registerCleanupFunction(function sync_cleanup_2() {
+ checkpoints.push(6);
+ });
+
+ registerCleanupFunction(async function async_cleanup_4() {
+ await undefined;
+ checkpoints.push(5);
+ });
+
+ registerCleanupFunction(async function async_cleanup_3() {
+ await undefined;
+ checkpoints.push(4);
+ });
+
+ registerCleanupFunction(function async_cleanup_2() {
+ let deferred = PromiseUtils.defer();
+ executeSoon(deferred.resolve);
+ return deferred.promise.then(function() {
+ checkpoints.push(3);
+ });
+ });
+
+ registerCleanupFunction(function sync_cleanup() {
+ checkpoints.push(2);
+ });
+
+ registerCleanupFunction(function async_cleanup() {
+ let deferred = PromiseUtils.defer();
+ executeSoon(deferred.resolve);
+ return deferred.promise.then(function() {
+ checkpoints.push(1);
+ });
+ });
+
+}
+"""
+
+# A test to check that add_test() tests run without run_test()
+NO_RUN_TEST_ADD_TEST = """
+add_test(function no_run_test_add_test() {
+ Assert.ok(true);
+ run_next_test();
+});
+"""
+
+# A test to check that add_task() tests run without run_test()
+NO_RUN_TEST_ADD_TASK = """
+add_task(function no_run_test_add_task() {
+ Assert.ok(true);
+});
+"""
+
+# A test to check that both add_task() and add_test() work without run_test()
+NO_RUN_TEST_ADD_TEST_ADD_TASK = """
+add_test(function no_run_test_add_test() {
+ Assert.ok(true);
+ run_next_test();
+});
+
+add_task(function no_run_test_add_task() {
+ Assert.ok(true);
+});
+"""
+
+# A test to check that an empty test file without run_test(),
+# add_test() or add_task() works.
+NO_RUN_TEST_EMPTY_TEST = """
+// This is an empty test file.
+"""
+
+NO_RUN_TEST_ADD_TEST_FAIL = """
+add_test(function no_run_test_add_test_fail() {
+ Assert.ok(false);
+ run_next_test();
+});
+"""
+
+NO_RUN_TEST_ADD_TASK_FAIL = """
+add_task(function no_run_test_add_task_fail() {
+ Assert.ok(false);
+});
+"""
+
+NO_RUN_TEST_ADD_TASK_MULTIPLE = """
+add_task(async function test_task() {
+ await Promise.resolve(true);
+});
+
+add_task(async function test_2() {
+ await Promise.resolve(true);
+});
+"""
+
+LOAD_MOZINFO = """
+function run_test() {
+ Assert.notEqual(typeof mozinfo, undefined);
+ Assert.notEqual(typeof mozinfo.os, undefined);
+}
+"""
+
+CHILD_MOZINFO = """
+function run_test () { run_next_test(); }
+
+add_test(function test_child_mozinfo () {
+ run_test_in_child("test_mozinfo.js");
+ run_next_test();
+});
+"""
+
+HEADLESS_TRUE = """
+add_task(function headless_true() {
+ Assert.equal(Services.env.get("MOZ_HEADLESS"), "1", "Check MOZ_HEADLESS");
+ Assert.equal(Services.env.get("DISPLAY"), "77", "Check DISPLAY");
+});
+"""
+
+HEADLESS_FALSE = """
+add_task(function headless_false() {
+ Assert.notEqual(Services.env.get("MOZ_HEADLESS"), "1", "Check MOZ_HEADLESS");
+ Assert.notEqual(Services.env.get("DISPLAY"), "77", "Check DISPLAY");
+});
+"""
+
+
+class XPCShellTestsTests(unittest.TestCase):
+ """
+ Yes, these are unit tests for a unit test harness.
+ """
+
+ def __init__(self, name):
+ super(XPCShellTestsTests, self).__init__(name)
+ from buildconfig import substs
+ from mozbuild.base import MozbuildObject
+
+ os.environ.pop("MOZ_OBJDIR", None)
+ self.build_obj = MozbuildObject.from_environment()
+
+ objdir = self.build_obj.topobjdir
+ self.testing_modules = os.path.join(objdir, "_tests", "modules")
+
+ if mozinfo.isMac:
+ self.xpcshellBin = os.path.join(
+ objdir,
+ "dist",
+ substs["MOZ_MACBUNDLE_NAME"],
+ "Contents",
+ "MacOS",
+ "xpcshell",
+ )
+ else:
+ self.xpcshellBin = os.path.join(objdir, "dist", "bin", "xpcshell")
+
+ if sys.platform == "win32":
+ self.xpcshellBin += ".exe"
+ self.utility_path = os.path.join(objdir, "dist", "bin")
+ self.symbols_path = None
+ candidate_path = os.path.join(self.build_obj.distdir, "crashreporter-symbols")
+ if os.path.isdir(candidate_path):
+ self.symbols_path = candidate_path
+
+ def setUp(self):
+ self.log = six.StringIO()
+ self.tempdir = tempfile.mkdtemp()
+ logger = structured.commandline.setup_logging(
+ "selftest%s" % id(self), {}, {"tbpl": self.log}
+ )
+ self.x = XPCShellTests(logger)
+ self.x.harness_timeout = 30 if not mozinfo.info["ccov"] else 60
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+ self.x.shutdownNode()
+
+ def writeFile(self, name, contents, mode="w"):
+ """
+ Write |contents| to a file named |name| in the temp directory,
+ and return the full path to the file.
+ """
+ fullpath = os.path.join(self.tempdir, name)
+ with open(fullpath, mode) as f:
+ f.write(contents)
+ return fullpath
+
+ def writeManifest(self, tests, prefs=[]):
+ """
+ Write an xpcshell.ini in the temp directory and set
+ self.manifest to its pathname. |tests| is a list containing
+ either strings (for test names), or tuples with a test name
+ as the first element and manifest conditions as the following
+ elements. |prefs| is an optional list of prefs in the form of
+ "prefname=prefvalue" strings.
+ """
+ testlines = []
+ for t in tests:
+ testlines.append("[%s]" % (t if isinstance(t, six.string_types) else t[0]))
+ if isinstance(t, tuple):
+ testlines.extend(t[1:])
+ prefslines = []
+ for p in prefs:
+ # Append prefs lines as indented inside "prefs=" manifest option.
+ prefslines.append(" %s" % p)
+
+ self.manifest = self.writeFile(
+ "xpcshell.ini",
+ """
+[DEFAULT]
+head =
+tail =
+prefs =
+"""
+ + "\n".join(prefslines)
+ + "\n"
+ + "\n".join(testlines),
+ )
+
+ def assertTestResult(self, expected, shuffle=False, verbose=False, headless=False):
+ """
+ Assert that self.x.runTests with manifest=self.manifest
+ returns |expected|.
+ """
+ kwargs = {}
+ kwargs["app_binary"] = self.app_binary
+ kwargs["xpcshell"] = self.xpcshellBin
+ kwargs["symbolsPath"] = self.symbols_path
+ kwargs["manifest"] = self.manifest
+ kwargs["mozInfo"] = mozinfo.info
+ kwargs["shuffle"] = shuffle
+ kwargs["verbose"] = verbose
+ kwargs["headless"] = headless
+ kwargs["sequential"] = True
+ kwargs["testingModulesDir"] = self.testing_modules
+ kwargs["utility_path"] = self.utility_path
+ self.assertEqual(
+ expected,
+ self.x.runTests(kwargs),
+ msg="""Tests should have %s, log:
+========
+%s
+========
+"""
+ % ("passed" if expected else "failed", self.log.getvalue()),
+ )
+
+ def _assertLog(self, s, expected):
+ l = self.log.getvalue()
+ self.assertEqual(
+ expected,
+ s in l,
+ msg="""Value %s %s in log:
+========
+%s
+========"""
+ % (s, "expected" if expected else "not expected", l),
+ )
+
+ def assertInLog(self, s):
+ """
+ Assert that the string |s| is contained in self.log.
+ """
+ self._assertLog(s, True)
+
+ def assertNotInLog(self, s):
+ """
+ Assert that the string |s| is not contained in self.log.
+ """
+ self._assertLog(s, False)
+
+ def testPass(self):
+ """
+ Check that a simple test without any manifest conditions passes.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest(["test_basic.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testFail(self):
+ """
+ Check that a simple failing test without any manifest conditions fails.
+ """
+ self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
+ self.writeManifest(["test_basic.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testPrefsInManifestVerbose(self):
+ """
+ Check prefs configuration option is supported in xpcshell manifests.
+ """
+ self.writeFile("test_prefs.js", SIMPLE_PREFCHECK_TEST)
+ self.writeManifest(tests=["test_prefs.js"], prefs=["fake.pref.to.test=true"])
+
+ self.assertTestResult(True, verbose=True)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertInLog("Per-test extra prefs will be set:")
+ self.assertInLog("fake.pref.to.test=true")
+
+ def testPrefsInManifestNonVerbose(self):
+ """
+ Check prefs configuration are not logged in non verbose mode.
+ """
+ self.writeFile("test_prefs.js", SIMPLE_PREFCHECK_TEST)
+ self.writeManifest(tests=["test_prefs.js"], prefs=["fake.pref.to.test=true"])
+
+ self.assertTestResult(True, verbose=False)
+ self.assertNotInLog("Per-test extra prefs will be set:")
+ self.assertNotInLog("fake.pref.to.test=true")
+
+ @unittest.skipIf(
+ mozinfo.isWin or not mozinfo.info.get("debug"),
+ "We don't have a stack fixer on hand for windows.",
+ )
+ def testAssertStack(self):
+ """
+ When an assertion is hit, we should produce a useful stack.
+ """
+ self.writeFile(
+ "test_assert.js",
+ """
+ add_test(function test_asserts_immediately() {
+ Components.classes["@mozilla.org/xpcom/debug;1"]
+ .getService(Components.interfaces.nsIDebug2)
+ .assertion("foo", "assertion failed", "test.js", 1)
+ run_next_test();
+ });
+ """,
+ )
+
+ self.writeManifest(["test_assert.js"])
+ self.assertTestResult(False)
+
+ self.assertInLog("###!!! ASSERTION")
+ log_lines = self.log.getvalue().splitlines()
+ line_pat = "#\d\d:"
+ unknown_pat = "#\d\d\: \?\?\?\[.* \+0x[a-f0-9]+\]"
+ self.assertFalse(
+ any(re.search(unknown_pat, line) for line in log_lines),
+ "An stack frame without symbols was found in\n%s"
+ % pprint.pformat(log_lines),
+ )
+ self.assertTrue(
+ any(re.search(line_pat, line) for line in log_lines),
+ "No line resembling a stack frame was found in\n%s"
+ % pprint.pformat(log_lines),
+ )
+
+ def testChildPass(self):
+ """
+ Check that a simple test running in a child process passes.
+ """
+ self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
+ self.writeFile("test_child_pass.js", CHILD_TEST_PASSING)
+ self.writeManifest(["test_child_pass.js"])
+
+ self.assertTestResult(True, verbose=True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog("CHILD-TEST-STARTED")
+ self.assertInLog("CHILD-TEST-COMPLETED")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testChildFail(self):
+ """
+ Check that a simple failing test running in a child process fails.
+ """
+ self.writeFile("test_fail.js", SIMPLE_FAILING_TEST)
+ self.writeFile("test_child_fail.js", CHILD_TEST_FAILING)
+ self.writeManifest(["test_child_fail.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("CHILD-TEST-STARTED")
+ self.assertInLog("CHILD-TEST-COMPLETED")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testChildHang(self):
+ """
+ Check that incomplete output from a child process results in a
+ test failure.
+ """
+ self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
+ self.writeFile("test_child_hang.js", CHILD_TEST_HANG)
+ self.writeManifest(["test_child_hang.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("CHILD-TEST-STARTED")
+ self.assertNotInLog("CHILD-TEST-COMPLETED")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testChild(self):
+ """
+ Checks that calling do_load_child_test_harness without run_test_in_child
+ results in a usable test state. This test has a spurious failure when
+ run using |mach python-test|. See bug 1103226.
+ """
+ self.writeFile("test_child_assertions.js", CHILD_HARNESS_SIMPLE)
+ self.writeManifest(["test_child_assertions.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testSkipForAddTest(self):
+ """
+ Check that add_test is skipped if |skip_if| condition is true
+ """
+ self.writeFile(
+ "test_skip.js",
+ """
+add_test({
+ skip_if: () => true,
+}, function test_should_be_skipped() {
+ Assert.ok(false);
+ run_next_test();
+});
+""",
+ )
+ self.writeManifest(["test_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNotSkipForAddTask(self):
+ """
+ Check that add_task is not skipped if |skip_if| condition is false
+ """
+ self.writeFile(
+ "test_not_skip.js",
+ """
+add_task({
+ skip_if: () => false,
+}, function test_should_not_be_skipped() {
+ Assert.ok(true);
+});
+""",
+ )
+ self.writeManifest(["test_not_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testSkipForAddTask(self):
+ """
+ Check that add_task is skipped if |skip_if| condition is true
+ """
+ self.writeFile(
+ "test_skip.js",
+ """
+add_task({
+ skip_if: () => true,
+}, function test_should_be_skipped() {
+ Assert.ok(false);
+});
+""",
+ )
+ self.writeManifest(["test_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNotSkipForAddTest(self):
+ """
+ Check that add_test is not skipped if |skip_if| condition is false
+ """
+ self.writeFile(
+ "test_not_skip.js",
+ """
+add_test({
+ skip_if: () => false,
+}, function test_should_not_be_skipped() {
+ Assert.ok(true);
+ run_next_test();
+});
+""",
+ )
+ self.writeManifest(["test_not_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testSyntaxError(self):
+ """
+ Check that running a test file containing a syntax error produces
+ a test failure and expected output.
+ """
+ self.writeFile("test_syntax_error.js", '"')
+ self.writeManifest(["test_syntax_error.js"])
+
+ self.assertTestResult(False, verbose=True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testUnicodeInAssertMethods(self):
+ """
+ Check that passing unicode characters through an assertion method works.
+ """
+ self.writeFile("test_unicode_assert.js", PASSING_TEST_UNICODE, mode="wb")
+ self.writeManifest(["test_unicode_assert.js"])
+
+ self.assertTestResult(True, verbose=True)
+
+ @unittest.skipIf(
+ "MOZ_AUTOMATION" in os.environ,
+ "Timeout code path occasionally times out (bug 1098121)",
+ )
+ def testHangingTimeout(self):
+ """
+ Check that a test that never finishes results in the correct error log.
+ """
+ self.writeFile("test_loop.js", SIMPLE_LOOPING_TEST)
+ self.writeManifest(["test_loop.js"])
+
+ old_timeout = self.x.harness_timeout
+ self.x.harness_timeout = 1
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog("TEST-UNEXPECTED-TIMEOUT")
+
+ self.x.harness_timeout = old_timeout
+
+ def testPassFail(self):
+ """
+ Check that running more than one test works.
+ """
+ self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
+ self.writeFile("test_fail.js", SIMPLE_FAILING_TEST)
+ self.writeManifest(["test_pass.js", "test_fail.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(2, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog(TEST_FAIL_STRING)
+
+ def testSkip(self):
+ """
+ Check that a simple failing test skipped in the manifest does
+ not cause failure.
+ """
+ self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
+ self.writeManifest([("test_basic.js", "skip-if = true")])
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertNotInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testKnownFail(self):
+ """
+ Check that a simple failing test marked as known-fail in the manifest
+ does not cause failure.
+ """
+ self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
+ self.writeManifest([("test_basic.js", "fail-if = true")])
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(1, self.x.todoCount)
+ self.assertInLog("TEST-FAIL")
+ # This should be suppressed because the harness doesn't include
+ # the full log from the xpcshell run when things pass.
+ self.assertNotInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testUnexpectedPass(self):
+ """
+ Check that a simple failing test marked as known-fail in the manifest
+ that passes causes an unexpected pass.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest([("test_basic.js", "fail-if = true")])
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ # From the outer (Python) harness
+ self.assertInLog("TEST-UNEXPECTED-PASS")
+ self.assertNotInLog("TEST-KNOWN-FAIL")
+
+ def testReturnNonzero(self):
+ """
+ Check that a test where xpcshell returns nonzero fails.
+ """
+ self.writeFile("test_error.js", "throw 'foo'")
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testUncaughtRejection(self):
+ """
+ Ensure a simple test with an uncaught rejection is reported.
+ """
+ self.writeFile(
+ "test_simple_uncaught_rejection.js", SIMPLE_UNCAUGHT_REJECTION_TEST
+ )
+ self.writeManifest(["test_simple_uncaught_rejection.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("test_simple_uncaught_rejection.js:3:18")
+ self.assertInLog("Test rejection.")
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+
+ def testAddTestSimple(self):
+ """
+ Ensure simple add_test() works.
+ """
+ self.writeFile("test_add_test_simple.js", ADD_TEST_SIMPLE)
+ self.writeManifest(["test_add_test_simple.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+
+ def testCrashLogging(self):
+ """
+ Test that a crashing test process logs a failure.
+ """
+ self.writeFile("test_crashes.js", TEST_CRASHING)
+ self.writeManifest(["test_crashes.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ if mozinfo.info.get("crashreporter"):
+ self.assertInLog("\nPROCESS-CRASH")
+
+ def testLogCorrectFileName(self):
+ """
+ Make sure a meaningful filename and line number is logged
+ by a passing test.
+ """
+ self.writeFile("test_add_test_simple.js", ADD_TEST_SIMPLE)
+ self.writeManifest(["test_add_test_simple.js"])
+
+ self.assertTestResult(True, verbose=True)
+ self.assertInLog("true == true")
+ self.assertNotInLog("[Assert.ok :")
+ self.assertInLog("[test_simple : 5]")
+
+ def testAddTestFailing(self):
+ """
+ Ensure add_test() with a failing test is reported.
+ """
+ self.writeFile("test_add_test_failing.js", ADD_TEST_FAILING)
+ self.writeManifest(["test_add_test_failing.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+
+ def testAddTestUncaughtRejection(self):
+ """
+ Ensure add_test() with an uncaught rejection is reported.
+ """
+ self.writeFile(
+ "test_add_test_uncaught_rejection.js", ADD_TEST_UNCAUGHT_REJECTION
+ )
+ self.writeManifest(["test_add_test_uncaught_rejection.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+
+ def testAddTaskTestSingle(self):
+ """
+ Ensure add_test_task() with a single passing test works.
+ """
+ self.writeFile("test_add_task_simple.js", ADD_TASK_SINGLE)
+ self.writeManifest(["test_add_task_simple.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+
+ def testAddTaskTestMultiple(self):
+ """
+ Ensure multiple calls to add_test_task() work as expected.
+ """
+ self.writeFile("test_add_task_multiple.js", ADD_TASK_MULTIPLE)
+ self.writeManifest(["test_add_task_multiple.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+
+ def testAddTaskTestRejected(self):
+ """
+ Ensure rejected task reports as failure.
+ """
+ self.writeFile("test_add_task_rejected.js", ADD_TASK_REJECTED)
+ self.writeManifest(["test_add_task_rejected.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+
+ def testAddTaskTestFailureInside(self):
+ """
+ Ensure tests inside task are reported as failures.
+ """
+ self.writeFile("test_add_task_failure_inside.js", ADD_TASK_FAILURE_INSIDE)
+ self.writeManifest(["test_add_task_failure_inside.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+
+ def testAddTaskRunNextTest(self):
+ """
+ Calling run_next_test() from inside add_task() results in failure.
+ """
+ self.writeFile("test_add_task_run_next_test.js", ADD_TASK_RUN_NEXT_TEST)
+ self.writeManifest(["test_add_task_run_next_test.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+
+ def testAddTaskStackTrace(self):
+ """
+ Ensuring that calling Assert.ok(false) from inside add_task()
+ results in a human-readable stack trace.
+ """
+ self.writeFile("test_add_task_stack_trace.js", ADD_TASK_STACK_TRACE)
+ self.writeManifest(["test_add_task_stack_trace.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog("this_test_will_fail")
+ self.assertInLog("run_next_test")
+ self.assertInLog("run_test")
+ self.assertNotInLog("Task.jsm")
+
+ def testAddTaskSkip(self):
+ self.writeFile("test_tasks_skip.js", ADD_TASK_SKIP)
+ self.writeManifest(["test_tasks_skip.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+
+ def testAddTaskSkipAll(self):
+ self.writeFile("test_tasks_skipall.js", ADD_TASK_SKIPALL)
+ self.writeManifest(["test_tasks_skipall.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+
+ def testMissingHeadFile(self):
+ """
+ Ensure that missing head file results in fatal error.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest([("test_basic.js", "head = missing.js")])
+
+ raised = False
+
+ try:
+ # The actual return value is never checked because we raise.
+ self.assertTestResult(True)
+ except Exception as ex:
+ raised = True
+ self.assertEqual(str(ex)[0:9], "head file")
+
+ self.assertTrue(raised)
+
+ def testRandomExecution(self):
+ """
+ Check that random execution doesn't break.
+ """
+ manifest = []
+ for i in range(0, 10):
+ filename = "test_pass_%d.js" % i
+ self.writeFile(filename, SIMPLE_PASSING_TEST)
+ manifest.append(filename)
+
+ self.writeManifest(manifest)
+ self.assertTestResult(True, shuffle=True)
+ self.assertEqual(10, self.x.testCount)
+ self.assertEqual(10, self.x.passCount)
+
+ def testDoThrowString(self):
+ """
+ Check that do_throw produces reasonable messages when the
+ input is a string instead of an object
+ """
+ self.writeFile("test_error.js", ADD_TEST_THROW_STRING)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("Passing a string to do_throw")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoThrowForeignObject(self):
+ """
+ Check that do_throw produces reasonable messages when the
+ input is a generic object with 'filename', 'message' and 'stack' attributes
+ but 'object instanceof Error' returns false
+ """
+ self.writeFile("test_error.js", ADD_TEST_THROW_OBJECT)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("failure.js")
+ self.assertInLog("Error object")
+ self.assertInLog("ERROR STACK")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportForeignObject(self):
+ """
+ Check that do_report_unexpected_exception produces reasonable messages when the
+ input is a generic object with 'filename', 'message' and 'stack' attributes
+ but 'object instanceof Error' returns false
+ """
+ self.writeFile("test_error.js", ADD_TEST_REPORT_OBJECT)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("failure.js")
+ self.assertInLog("Error object")
+ self.assertInLog("ERROR STACK")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportRefError(self):
+ """
+ Check that do_report_unexpected_exception produces reasonable messages when the
+ input is a JS-generated Error
+ """
+ self.writeFile("test_error.js", ADD_TEST_REPORT_REF_ERROR)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("test_error.js")
+ self.assertInLog("obj.noSuchFunction is not a function")
+ self.assertInLog("run_test@")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportSyntaxError(self):
+ """
+ Check that attempting to load a test file containing a syntax error
+ generates details of the error in the log
+ """
+ self.writeFile("test_error.js", LOAD_ERROR_SYNTAX_ERROR)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("test_error.js:3")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportNonSyntaxError(self):
+ """
+ Check that attempting to load a test file containing an error other
+ than a syntax error generates details of the error in the log
+ """
+ self.writeFile("test_error.js", LOAD_ERROR_OTHER_ERROR)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("ReferenceError: assignment to undeclared variable")
+ self.assertInLog("test_error.js:3")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoPrintWhenVerboseNotExplicit(self):
+ """
+ Check that info() and similar calls that generate output do
+ not have the output when not run verbosely.
+ """
+ self.writeFile("test_verbose.js", ADD_TEST_VERBOSE)
+ self.writeManifest(["test_verbose.js"])
+
+ self.assertTestResult(True)
+ self.assertNotInLog("a message from info")
+
+ def testDoPrintWhenVerboseExplicit(self):
+ """
+ Check that info() and similar calls that generate output have the
+ output shown when run verbosely.
+ """
+ self.writeFile("test_verbose.js", ADD_TEST_VERBOSE)
+ self.writeManifest(["test_verbose.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertInLog("a message from info")
+
+ def testDoPrintWhenVerboseInManifest(self):
+ """
+ Check that info() and similar calls that generate output have the
+ output shown when 'verbose = true' is in the manifest, even when
+ not run verbosely.
+ """
+ self.writeFile("test_verbose.js", ADD_TEST_VERBOSE)
+ self.writeManifest([("test_verbose.js", "verbose = true")])
+
+ self.assertTestResult(True)
+ self.assertInLog("a message from info")
+
+ def testAsyncCleanup(self):
+ """
+ Check that registerCleanupFunction handles nicely async cleanup tasks
+ """
+ self.writeFile("test_asyncCleanup.js", ASYNC_CLEANUP)
+ self.writeManifest(["test_asyncCleanup.js"])
+ self.assertTestResult(False)
+ self.assertInLog('"123456" == "123456"')
+ self.assertInLog("At this stage, the test has succeeded")
+ self.assertInLog("Throwing an error to force displaying the log")
+
+ def testNoRunTestAddTest(self):
+ """
+ Check that add_test() works fine without run_test() in the test file.
+ """
+ self.writeFile("test_noRunTestAddTest.js", NO_RUN_TEST_ADD_TEST)
+ self.writeManifest(["test_noRunTestAddTest.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestAddTask(self):
+ """
+ Check that add_task() works fine without run_test() in the test file.
+ """
+ self.writeFile("test_noRunTestAddTask.js", NO_RUN_TEST_ADD_TASK)
+ self.writeManifest(["test_noRunTestAddTask.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestAddTestAddTask(self):
+ """
+ Check that both add_test() and add_task() work without run_test()
+ in the test file.
+ """
+ self.writeFile("test_noRunTestAddTestAddTask.js", NO_RUN_TEST_ADD_TEST_ADD_TASK)
+ self.writeManifest(["test_noRunTestAddTestAddTask.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestEmptyTest(self):
+ """
+ Check that the test passes on an empty file that contains neither
+ run_test() nor add_test(), add_task().
+ """
+ self.writeFile("test_noRunTestEmptyTest.js", NO_RUN_TEST_EMPTY_TEST)
+ self.writeManifest(["test_noRunTestEmptyTest.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestAddTestFail(self):
+ """
+ Check that test fails on using add_test() without run_test().
+ """
+ self.writeFile("test_noRunTestAddTestFail.js", NO_RUN_TEST_ADD_TEST_FAIL)
+ self.writeManifest(["test_noRunTestAddTestFail.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testNoRunTestAddTaskFail(self):
+ """
+ Check that test fails on using add_task() without run_test().
+ """
+ self.writeFile("test_noRunTestAddTaskFail.js", NO_RUN_TEST_ADD_TASK_FAIL)
+ self.writeManifest(["test_noRunTestAddTaskFail.js"])
+
+ self.assertTestResult(False)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(0, self.x.passCount)
+ self.assertEqual(1, self.x.failCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testNoRunTestAddTaskMultiple(self):
+ """
+ Check that multple add_task() tests work without run_test().
+ """
+ self.writeFile(
+ "test_noRunTestAddTaskMultiple.js", NO_RUN_TEST_ADD_TASK_MULTIPLE
+ )
+ self.writeManifest(["test_noRunTestAddTaskMultiple.js"])
+
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testMozinfo(self):
+ """
+ Check that mozinfo.json is loaded
+ """
+ self.writeFile("test_mozinfo.js", LOAD_MOZINFO)
+ self.writeManifest(["test_mozinfo.js"])
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testChildMozinfo(self):
+ """
+ Check that mozinfo.json is loaded in child process
+ """
+ self.writeFile("test_mozinfo.js", LOAD_MOZINFO)
+ self.writeFile("test_child_mozinfo.js", CHILD_MOZINFO)
+ self.writeManifest(["test_child_mozinfo.js"])
+ self.assertTestResult(True)
+ self.assertEqual(1, self.x.testCount)
+ self.assertEqual(1, self.x.passCount)
+ self.assertEqual(0, self.x.failCount)
+ self.assertEqual(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNotHeadlessByDefault(self):
+ """
+ Check that the default is not headless.
+ """
+ self.writeFile("test_notHeadlessByDefault.js", HEADLESS_FALSE)
+ self.writeManifest(["test_notHeadlessByDefault.js"])
+ self.assertTestResult(True)
+
+ def testHeadlessWhenHeadlessExplicit(self):
+ """
+ Check that explicitly requesting headless works when the manifest doesn't override.
+ """
+ self.writeFile("test_headlessWhenExplicit.js", HEADLESS_TRUE)
+ self.writeManifest(["test_headlessWhenExplicit.js"])
+ self.assertTestResult(True, headless=True)
+
+ def testHeadlessWhenHeadlessTrueInManifest(self):
+ """
+ Check that enabling headless in the manifest alone works.
+ """
+ self.writeFile("test_headlessWhenTrueInManifest.js", HEADLESS_TRUE)
+ self.writeManifest([("test_headlessWhenTrueInManifest.js", "headless = true")])
+ self.assertTestResult(True)
+
+ def testNotHeadlessWhenHeadlessFalseInManifest(self):
+ """
+ Check that the manifest entry overrides the explicit default.
+ """
+ self.writeFile("test_notHeadlessWhenFalseInManifest.js", HEADLESS_FALSE)
+ self.writeManifest(
+ [("test_notHeadlessWhenFalseInManifest.js", "headless = false")]
+ )
+ self.assertTestResult(True, headless=True)
+
+
+if __name__ == "__main__":
+ import mozunit
+
+ mozinfo.find_and_update_from_json()
+ mozunit.main()
diff --git a/testing/xpcshell/xpcshellcommandline.py b/testing/xpcshell/xpcshellcommandline.py
new file mode 100644
index 0000000000..66b1d5cbc9
--- /dev/null
+++ b/testing/xpcshell/xpcshellcommandline.py
@@ -0,0 +1,412 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+
+from mozlog import commandline
+
+
+def add_common_arguments(parser):
+ parser.add_argument(
+ "--app-binary",
+ type=str,
+ dest="app_binary",
+ default=None,
+ help="path to application binary (eg: c:\program files\mozilla firefox\firefox.exe)",
+ )
+ parser.add_argument(
+ "--app-path",
+ type=str,
+ dest="appPath",
+ default=None,
+ help="application directory (as opposed to XRE directory)",
+ )
+ parser.add_argument(
+ "--interactive",
+ action="store_true",
+ dest="interactive",
+ default=False,
+ help="don't automatically run tests, drop to an xpcshell prompt",
+ )
+ parser.add_argument(
+ "--verbose",
+ action="store_true",
+ dest="verbose",
+ default=False,
+ help="always print stdout and stderr from tests",
+ )
+ parser.add_argument(
+ "--verbose-if-fails",
+ action="store_true",
+ dest="verboseIfFails",
+ default=False,
+ help="Output the log if a test fails, even when run in parallel",
+ )
+ parser.add_argument(
+ "--keep-going",
+ action="store_true",
+ dest="keepGoing",
+ default=False,
+ help="continue running tests after test killed with control-C (SIGINT)",
+ )
+ parser.add_argument(
+ "--logfiles",
+ action="store_true",
+ dest="logfiles",
+ default=True,
+ help="create log files (default, only used to override --no-logfiles)",
+ )
+ parser.add_argument(
+ "--dump-tests",
+ type=str,
+ dest="dump_tests",
+ default=None,
+ help="Specify path to a filename to dump all the tests that will be run",
+ )
+ parser.add_argument(
+ "--manifest",
+ type=str,
+ dest="manifest",
+ default=None,
+ help="Manifest of test directories to use",
+ )
+ parser.add_argument(
+ "--no-logfiles",
+ action="store_false",
+ dest="logfiles",
+ help="don't create log files",
+ )
+ parser.add_argument(
+ "--sequential",
+ action="store_true",
+ dest="sequential",
+ default=False,
+ help="Run all tests sequentially",
+ )
+ parser.add_argument(
+ "--temp-dir",
+ dest="tempDir",
+ default=None,
+ help="Directory to use for temporary files",
+ )
+ parser.add_argument(
+ "--testing-modules-dir",
+ dest="testingModulesDir",
+ default=None,
+ help="Directory where testing modules are located.",
+ )
+ parser.add_argument(
+ "--total-chunks",
+ type=int,
+ dest="totalChunks",
+ default=1,
+ help="how many chunks to split the tests up into",
+ )
+ parser.add_argument(
+ "--this-chunk",
+ type=int,
+ dest="thisChunk",
+ default=1,
+ help="which chunk to run between 1 and --total-chunks",
+ )
+ parser.add_argument(
+ "--profile-name",
+ type=str,
+ dest="profileName",
+ default=None,
+ help="name of application profile being tested",
+ )
+ parser.add_argument(
+ "--build-info-json",
+ type=str,
+ dest="mozInfo",
+ default=None,
+ help="path to a mozinfo.json including information about the build "
+ "configuration. defaults to looking for mozinfo.json next to "
+ "the script.",
+ )
+ parser.add_argument(
+ "--shuffle",
+ action="store_true",
+ dest="shuffle",
+ default=False,
+ help="Execute tests in random order",
+ )
+ parser.add_argument(
+ "--xre-path",
+ action="store",
+ type=str,
+ dest="xrePath",
+ # individual scripts will set a sane default
+ default=None,
+ help="absolute path to directory containing XRE (probably xulrunner)",
+ )
+ parser.add_argument(
+ "--symbols-path",
+ action="store",
+ type=str,
+ dest="symbolsPath",
+ default=None,
+ help="absolute path to directory containing breakpad symbols, "
+ "or the URL of a zip file containing symbols",
+ )
+ parser.add_argument(
+ "--jscov-dir-prefix",
+ action="store",
+ type=str,
+ dest="jscovdir",
+ default=argparse.SUPPRESS,
+ help="Directory to store per-test javascript line coverage data as json.",
+ )
+ parser.add_argument(
+ "--debugger",
+ action="store",
+ dest="debugger",
+ help="use the given debugger to launch the application",
+ )
+ parser.add_argument(
+ "--debugger-args",
+ action="store",
+ dest="debuggerArgs",
+ help="pass the given args to the debugger _before_ "
+ "the application on the command line",
+ )
+ parser.add_argument(
+ "--debugger-interactive",
+ action="store_true",
+ dest="debuggerInteractive",
+ help="prevents the test harness from redirecting "
+ "stdout and stderr for interactive debuggers",
+ )
+ parser.add_argument(
+ "--jsdebugger",
+ dest="jsDebugger",
+ action="store_true",
+ help="Waits for a devtools JS debugger to connect before " "starting the test.",
+ )
+ parser.add_argument(
+ "--jsdebugger-port",
+ type=int,
+ dest="jsDebuggerPort",
+ default=6000,
+ help="The port to listen on for a debugger connection if "
+ "--jsdebugger is specified.",
+ )
+ parser.add_argument(
+ "--tag",
+ action="append",
+ dest="test_tags",
+ default=None,
+ help="filter out tests that don't have the given tag. Can be "
+ "used multiple times in which case the test must contain "
+ "at least one of the given tags.",
+ )
+ parser.add_argument(
+ "--utility-path",
+ action="store",
+ dest="utility_path",
+ default=None,
+ help="Path to a directory containing utility programs, such "
+ "as stack fixer scripts.",
+ )
+ parser.add_argument(
+ "--xpcshell",
+ action="store",
+ dest="xpcshell",
+ default=None,
+ help="Path to xpcshell binary",
+ )
+ parser.add_argument(
+ "--http3server",
+ action="store",
+ dest="http3server",
+ default=None,
+ help="Path to http3server binary",
+ )
+ # This argument can be just present, or the path to a manifest file. The
+ # just-present case is usually used for mach which can provide a default
+ # path to the failure file from the previous run
+ parser.add_argument(
+ "--rerun-failures",
+ action="store_true",
+ help="Rerun failures from the previous run, if any",
+ )
+ parser.add_argument(
+ "--failure-manifest",
+ action="store",
+ help="Path to a manifest file from which to rerun failures "
+ "(with --rerun-failure) or in which to record failed tests",
+ )
+ parser.add_argument(
+ "--threads",
+ type=int,
+ dest="threadCount",
+ default=0,
+ help="override the number of jobs (threads) when running tests "
+ "in parallel, the default is CPU x 1.5 when running via mach "
+ "and CPU x 4 when running in automation",
+ )
+ parser.add_argument(
+ "--setpref",
+ action="append",
+ dest="extraPrefs",
+ metavar="PREF=VALUE",
+ help="Defines an extra user preference (can be passed multiple times.",
+ )
+ parser.add_argument(
+ "testPaths", nargs="*", default=None, help="Paths of tests to run."
+ )
+ parser.add_argument(
+ "--verify",
+ action="store_true",
+ default=False,
+ help="Run tests in verification mode: Run many times in different "
+ "ways, to see if there are intermittent failures.",
+ )
+ parser.add_argument(
+ "--verify-max-time",
+ dest="verifyMaxTime",
+ type=int,
+ default=3600,
+ help="Maximum time, in seconds, to run in --verify mode.",
+ )
+ parser.add_argument(
+ "--headless",
+ action="store_true",
+ default=False,
+ dest="headless",
+ help="Enable headless mode by default for tests which don't specify "
+ "whether to use headless mode",
+ )
+ parser.add_argument(
+ "--conditioned-profile",
+ action="store_true",
+ default=False,
+ dest="conditionedProfile",
+ help="Run with conditioned profile instead of fresh blank profile",
+ )
+ parser.add_argument(
+ "--self-test",
+ action="store_true",
+ default=False,
+ dest="self_test",
+ help="Run self tests",
+ )
+ parser.add_argument(
+ "--run-failures",
+ action="store",
+ default="",
+ dest="runFailures",
+ help="Run failures matching keyword",
+ )
+ parser.add_argument(
+ "--timeout-as-pass",
+ action="store_true",
+ default=False,
+ dest="timeoutAsPass",
+ help="Harness level timeouts will be treated as passing",
+ )
+ parser.add_argument(
+ "--crash-as-pass",
+ action="store_true",
+ default=False,
+ dest="crashAsPass",
+ help="Harness level crashes will be treated as passing",
+ )
+ parser.add_argument(
+ "--disable-fission",
+ action="store_true",
+ default=False,
+ dest="disableFission",
+ help="disable fission mode (back to e10s || 1proc)",
+ )
+
+
+def add_remote_arguments(parser):
+ parser.add_argument(
+ "--objdir",
+ action="store",
+ type=str,
+ dest="objdir",
+ help="Local objdir, containing xpcshell binaries.",
+ )
+
+ parser.add_argument(
+ "--apk",
+ action="store",
+ type=str,
+ dest="localAPK",
+ help="Local path to Firefox for Android APK.",
+ )
+
+ parser.add_argument(
+ "--deviceSerial",
+ action="store",
+ type=str,
+ dest="deviceSerial",
+ help="adb serial number of remote device. This is required "
+ "when more than one device is connected to the host. "
+ "Use 'adb devices' to see connected devices.",
+ )
+
+ parser.add_argument(
+ "--adbPath",
+ action="store",
+ type=str,
+ dest="adbPath",
+ default=None,
+ help="Path to adb binary.",
+ )
+
+ parser.add_argument(
+ "--noSetup",
+ action="store_false",
+ dest="setup",
+ default=True,
+ help="Do not copy any files to device (to be used only if "
+ "device is already setup).",
+ )
+ parser.add_argument(
+ "--no-install",
+ action="store_false",
+ dest="setup",
+ default=True,
+ help="Don't install the app or any files to the device (to be used if "
+ "the device is already set up)",
+ )
+
+ parser.add_argument(
+ "--local-bin-dir",
+ action="store",
+ type=str,
+ dest="localBin",
+ help="Local path to bin directory.",
+ )
+
+ parser.add_argument(
+ "--remoteTestRoot",
+ action="store",
+ type=str,
+ dest="remoteTestRoot",
+ help="Remote directory to use as test root " "(eg. /data/local/tmp/test_root).",
+ )
+
+
+def parser_desktop():
+ parser = argparse.ArgumentParser()
+ add_common_arguments(parser)
+ commandline.add_logging_group(parser)
+
+ return parser
+
+
+def parser_remote():
+ parser = argparse.ArgumentParser()
+ common = parser.add_argument_group("Common Options")
+ add_common_arguments(common)
+ remote = parser.add_argument_group("Remote Options")
+ add_remote_arguments(remote)
+ commandline.add_logging_group(parser)
+
+ return parser