summaryrefslogtreecommitdiffstats
path: root/testing/xpcshell/dns-packet
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--testing/xpcshell/dns-packet/.editorconfig10
-rw-r--r--testing/xpcshell/dns-packet/.eslintrc9
-rw-r--r--testing/xpcshell/dns-packet/.gitignore4
-rw-r--r--testing/xpcshell/dns-packet/.travis.yml11
-rw-r--r--testing/xpcshell/dns-packet/CHANGELOG.md30
-rw-r--r--testing/xpcshell/dns-packet/LICENSE21
-rw-r--r--testing/xpcshell/dns-packet/README.md365
-rw-r--r--testing/xpcshell/dns-packet/classes.js23
-rw-r--r--testing/xpcshell/dns-packet/examples/doh.js52
-rw-r--r--testing/xpcshell/dns-packet/examples/tcp.js52
-rw-r--r--testing/xpcshell/dns-packet/examples/tls.js61
-rw-r--r--testing/xpcshell/dns-packet/examples/udp.js28
-rw-r--r--testing/xpcshell/dns-packet/index.js1841
-rw-r--r--testing/xpcshell/dns-packet/opcodes.js50
-rw-r--r--testing/xpcshell/dns-packet/optioncodes.js61
-rw-r--r--testing/xpcshell/dns-packet/package.json48
-rw-r--r--testing/xpcshell/dns-packet/rcodes.js50
-rw-r--r--testing/xpcshell/dns-packet/test.js613
-rw-r--r--testing/xpcshell/dns-packet/types.js105
19 files changed, 3434 insertions, 0 deletions
diff --git a/testing/xpcshell/dns-packet/.editorconfig b/testing/xpcshell/dns-packet/.editorconfig
new file mode 100644
index 0000000000..aaaa7a4baa
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.editorconfig
@@ -0,0 +1,10 @@
+root = true
+
+[*]
+indent_style = space
+indent_size = 2
+tab_width = 2
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
diff --git a/testing/xpcshell/dns-packet/.eslintrc b/testing/xpcshell/dns-packet/.eslintrc
new file mode 100644
index 0000000000..d3ed05cf86
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.eslintrc
@@ -0,0 +1,9 @@
+root: true
+
+parserOptions:
+ ecmaVersion: 2015
+
+env:
+ node: true
+
+extends: standard
diff --git a/testing/xpcshell/dns-packet/.gitignore b/testing/xpcshell/dns-packet/.gitignore
new file mode 100644
index 0000000000..cea4849cd9
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.gitignore
@@ -0,0 +1,4 @@
+node_modules/
+.nyc_output/
+coverage/
+package-lock.json
diff --git a/testing/xpcshell/dns-packet/.travis.yml b/testing/xpcshell/dns-packet/.travis.yml
new file mode 100644
index 0000000000..e0211604d3
--- /dev/null
+++ b/testing/xpcshell/dns-packet/.travis.yml
@@ -0,0 +1,11 @@
+language: node_js
+node_js:
+ - node
+ - lts/*
+install:
+- npm install
+- npm install coveralls
+script:
+- npm run coverage
+after_success:
+- npx nyc report --reporter=text-lcov | npx coveralls
diff --git a/testing/xpcshell/dns-packet/CHANGELOG.md b/testing/xpcshell/dns-packet/CHANGELOG.md
new file mode 100644
index 0000000000..6b714e04c9
--- /dev/null
+++ b/testing/xpcshell/dns-packet/CHANGELOG.md
@@ -0,0 +1,30 @@
+# Version 5.2.0 - 2019-02-21
+
+- Feature: Added support for de/encoding certain OPT options.
+
+# Version 5.1.0 - 2019-01-22
+
+- Feature: Added support for the RP record type.
+
+# Version 5.0.0 - 2018-06-01
+
+- Breaking: Node.js 6.0.0 or greater is now required.
+- Feature: Added support for DNSSEC record types.
+
+# Version 4.1.0 - 2018-02-11
+
+- Feature: Added support for the MX record type.
+
+# Version 4.0.0 - 2018-02-04
+
+- Feature: Added `streamEncode` and `streamDecode` methods for encoding TCP packets.
+- Breaking: Changed the decoded value of TXT records to an array of Buffers. This is to accomodate DNS-SD records which rely on the individual strings record being separated.
+- Breaking: Renamed the `flag_trunc` and `flag_auth` to `flag_tc` and `flag_aa` to match the names of these in the dns standards.
+
+# Version 3.0.0 - 2018-01-12
+
+- Breaking: The `class` option has been changed from integer to string.
+
+# Version 2.0.0 - 2018-01-11
+
+- Breaking: Converted module to ES2015, now requires Node.js 4.0 or greater
diff --git a/testing/xpcshell/dns-packet/LICENSE b/testing/xpcshell/dns-packet/LICENSE
new file mode 100644
index 0000000000..bae9da7bfa
--- /dev/null
+++ b/testing/xpcshell/dns-packet/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Mathias Buus
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/testing/xpcshell/dns-packet/README.md b/testing/xpcshell/dns-packet/README.md
new file mode 100644
index 0000000000..2a729b3d10
--- /dev/null
+++ b/testing/xpcshell/dns-packet/README.md
@@ -0,0 +1,365 @@
+# dns-packet
+[![](https://img.shields.io/npm/v/dns-packet.svg?style=flat)](https://www.npmjs.org/package/dns-packet) [![](https://img.shields.io/npm/dm/dns-packet.svg)](https://www.npmjs.org/package/dns-packet) [![](https://api.travis-ci.org/mafintosh/dns-packet.svg?style=flat)](https://travis-ci.org/mafintosh/dns-packet) [![Coverage Status](https://coveralls.io/repos/github/mafintosh/dns-packet/badge.svg?branch=master)](https://coveralls.io/github/mafintosh/dns-packet?branch=master)
+
+An [abstract-encoding](https://github.com/mafintosh/abstract-encoding) compliant module for encoding / decoding DNS packets. Lifted out of [multicast-dns](https://github.com/mafintosh/multicast-dns) as a separate module.
+
+```
+npm install dns-packet
+```
+
+## UDP Usage
+
+``` js
+const dnsPacket = require('dns-packet')
+const dgram = require('dgram')
+
+const socket = dgram.createSocket('udp4')
+
+const buf = dnsPacket.encode({
+ type: 'query',
+ id: 1,
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+socket.on('message', message => {
+ console.log(dnsPacket.decode(message)) // prints out a response from google dns
+})
+
+socket.send(buf, 0, buf.length, 53, '8.8.8.8')
+```
+
+Also see [the UDP example](examples/udp.js).
+
+## TCP, TLS, HTTPS
+
+While DNS has traditionally been used over a datagram transport, it is increasingly being carried over TCP for larger responses commonly including DNSSEC responses and TLS or HTTPS for enhanced security. See below examples on how to use `dns-packet` to wrap DNS packets in these protocols:
+
+- [TCP](examples/tcp.js)
+- [DNS over TLS](examples/tls.js)
+- [DNS over HTTPS](examples/doh.js)
+
+## API
+
+#### `var buf = packets.encode(packet, [buf], [offset])`
+
+Encodes a DNS packet into a buffer containing a UDP payload.
+
+#### `var packet = packets.decode(buf, [offset])`
+
+Decode a DNS packet from a buffer containing a UDP payload.
+
+#### `var buf = packets.streamEncode(packet, [buf], [offset])`
+
+Encodes a DNS packet into a buffer containing a TCP payload.
+
+#### `var packet = packets.streamDecode(buf, [offset])`
+
+Decode a DNS packet from a buffer containing a TCP payload.
+
+#### `var len = packets.encodingLength(packet)`
+
+Returns how many bytes are needed to encode the DNS packet
+
+## Packets
+
+Packets look like this
+
+``` js
+{
+ type: 'query|response',
+ id: optionalIdNumber,
+ flags: optionalBitFlags,
+ questions: [...],
+ answers: [...],
+ additionals: [...],
+ authorities: [...]
+}
+```
+
+The bit flags available are
+
+``` js
+packet.RECURSION_DESIRED
+packet.RECURSION_AVAILABLE
+packet.TRUNCATED_RESPONSE
+packet.AUTHORITATIVE_ANSWER
+packet.AUTHENTIC_DATA
+packet.CHECKING_DISABLED
+```
+
+To use more than one flag bitwise-or them together
+
+``` js
+var flags = packet.RECURSION_DESIRED | packet.RECURSION_AVAILABLE
+```
+
+And to check for a flag use bitwise-and
+
+``` js
+var isRecursive = message.flags & packet.RECURSION_DESIRED
+```
+
+A question looks like this
+
+``` js
+{
+ type: 'A', // or SRV, AAAA, etc
+ class: 'IN', // one of IN, CS, CH, HS, ANY. Default: IN
+ name: 'google.com' // which record are you looking for
+}
+```
+
+And an answer, additional, or authority looks like this
+
+``` js
+{
+ type: 'A', // or SRV, AAAA, etc
+ class: 'IN', // one of IN, CS, CH, HS
+ name: 'google.com', // which name is this record for
+ ttl: optionalTimeToLiveInSeconds,
+ (record specific data, see below)
+}
+```
+
+## Supported record types
+
+#### `A`
+
+``` js
+{
+ data: 'IPv4 address' // fx 127.0.0.1
+}
+```
+
+#### `AAAA`
+
+``` js
+{
+ data: 'IPv6 address' // fx fe80::1
+}
+```
+
+#### `CAA`
+
+``` js
+{
+ flags: 128, // octet
+ tag: 'issue|issuewild|iodef',
+ value: 'ca.example.net',
+ issuerCritical: false
+}
+```
+
+#### `CNAME`
+
+``` js
+{
+ data: 'cname.to.another.record'
+}
+```
+
+#### `DNAME`
+
+``` js
+{
+ data: 'dname.to.another.record'
+}
+```
+
+#### `DNSKEY`
+
+``` js
+{
+ flags: 257, // 16 bits
+ algorithm: 1, // octet
+ key: Buffer
+}
+```
+
+#### `DS`
+
+``` js
+{
+ keyTag: 12345,
+ algorithm: 8,
+ digestType: 1,
+ digest: Buffer
+}
+```
+
+#### `HINFO`
+
+``` js
+{
+ data: {
+ cpu: 'cpu info',
+ os: 'os info'
+ }
+}
+```
+
+#### `MX`
+
+``` js
+{
+ preference: 10,
+ exchange: 'mail.example.net'
+}
+```
+
+#### `NS`
+
+``` js
+{
+ data: nameServer
+}
+```
+
+#### `NSEC`
+
+``` js
+{
+ nextDomain: 'a.domain',
+ rrtypes: ['A', 'TXT', 'RRSIG']
+}
+```
+
+#### `NSEC3`
+
+``` js
+{
+ algorithm: 1,
+ flags: 0,
+ iterations: 2,
+ salt: Buffer,
+ nextDomain: Buffer, // Hashed per RFC5155
+ rrtypes: ['A', 'TXT', 'RRSIG']
+}
+```
+
+#### `NULL`
+
+``` js
+{
+ data: Buffer('any binary data')
+}
+```
+
+#### `OPT`
+
+[EDNS0](https://tools.ietf.org/html/rfc6891) options.
+
+``` js
+{
+ type: 'OPT',
+ name: '.',
+ udpPayloadSize: 4096,
+ flags: packet.DNSSEC_OK,
+ options: [{
+ // pass in any code/data for generic EDNS0 options
+ code: 12,
+ data: Buffer.alloc(31)
+ }, {
+ // Several EDNS0 options have enhanced support
+ code: 'PADDING',
+ length: 31,
+ }, {
+ code: 'CLIENT_SUBNET',
+ family: 2, // 1 for IPv4, 2 for IPv6
+ sourcePrefixLength: 64, // used to truncate IP address
+ scopePrefixLength: 0,
+ ip: 'fe80::',
+ }, {
+ code: 'TCP_KEEPALIVE',
+ timeout: 150 // increments of 100ms. This means 15s.
+ }, {
+ code: 'KEY_TAG',
+ tags: [1, 2, 3],
+ }]
+}
+```
+
+The options `PADDING`, `CLIENT_SUBNET`, `TCP_KEEPALIVE` and `KEY_TAG` support enhanced de/encoding. See [optionscodes.js](https://github.com/mafintosh/dns-packet/blob/master/optioncodes.js) for all supported option codes. If the `data` property is present on a option, it takes precedence. On decoding, `data` will always be defined.
+
+#### `PTR`
+
+``` js
+{
+ data: 'points.to.another.record'
+}
+```
+
+#### `RP`
+
+``` js
+{
+ mbox: 'admin.example.com',
+ txt: 'txt.example.com'
+}
+```
+
+#### `RRSIG`
+
+``` js
+{
+ typeCovered: 'A',
+ algorithm: 8,
+ labels: 1,
+ originalTTL: 3600,
+ expiration: timestamp,
+ inception: timestamp,
+ keyTag: 12345,
+ signersName: 'a.name',
+ signature: Buffer
+}
+```
+
+#### `SOA`
+
+``` js
+{
+ data:
+ {
+ mname: domainName,
+ rname: mailbox,
+ serial: zoneSerial,
+ refresh: refreshInterval,
+ retry: retryInterval,
+ expire: expireInterval,
+ minimum: minimumTTL
+ }
+}
+```
+
+#### `SRV`
+
+``` js
+{
+ data: {
+ port: servicePort,
+ target: serviceHostName,
+ priority: optionalServicePriority,
+ weight: optionalServiceWeight
+ }
+}
+```
+
+#### `TXT`
+
+``` js
+{
+ data: 'text' || Buffer || [ Buffer || 'text' ]
+}
+```
+
+When encoding, scalar values are converted to an array and strings are converted to UTF-8 encoded Buffers. When decoding, the return value will always be an array of Buffer.
+
+If you need another record type, open an issue and we'll try to add it.
+
+## License
+
+MIT
diff --git a/testing/xpcshell/dns-packet/classes.js b/testing/xpcshell/dns-packet/classes.js
new file mode 100644
index 0000000000..9a3d9b1e8c
--- /dev/null
+++ b/testing/xpcshell/dns-packet/classes.js
@@ -0,0 +1,23 @@
+'use strict'
+
+exports.toString = function (klass) {
+ switch (klass) {
+ case 1: return 'IN'
+ case 2: return 'CS'
+ case 3: return 'CH'
+ case 4: return 'HS'
+ case 255: return 'ANY'
+ }
+ return 'UNKNOWN_' + klass
+}
+
+exports.toClass = function (name) {
+ switch (name.toUpperCase()) {
+ case 'IN': return 1
+ case 'CS': return 2
+ case 'CH': return 3
+ case 'HS': return 4
+ case 'ANY': return 255
+ }
+ return 0
+}
diff --git a/testing/xpcshell/dns-packet/examples/doh.js b/testing/xpcshell/dns-packet/examples/doh.js
new file mode 100644
index 0000000000..37ef19fc35
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/doh.js
@@ -0,0 +1,52 @@
+
+'use strict'
+
+/*
+ * Sample code to make DNS over HTTPS request using POST
+ * AUTHOR: Tom Pusateri <pusateri@bangj.com>
+ * DATE: March 17, 2018
+ * LICENSE: MIT
+ */
+
+const dnsPacket = require('..')
+const https = require('https')
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.encode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+const options = {
+ hostname: 'dns.google.com',
+ port: 443,
+ path: '/experimental',
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/dns-udpwireformat',
+ 'Content-Length': Buffer.byteLength(buf)
+ }
+}
+
+const request = https.request(options, (response) => {
+ console.log('statusCode:', response.statusCode)
+ console.log('headers:', response.headers)
+
+ response.on('data', (d) => {
+ console.log(dnsPacket.decode(d))
+ })
+})
+
+request.on('error', (e) => {
+ console.error(e)
+})
+request.write(buf)
+request.end()
diff --git a/testing/xpcshell/dns-packet/examples/tcp.js b/testing/xpcshell/dns-packet/examples/tcp.js
new file mode 100644
index 0000000000..b25c2c41cb
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/tcp.js
@@ -0,0 +1,52 @@
+'use strict'
+
+const dnsPacket = require('..')
+const net = require('net')
+
+var response = null
+var expectedLength = 0
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.streamEncode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+const client = new net.Socket()
+client.connect(53, '8.8.8.8', function () {
+ console.log('Connected')
+ client.write(buf)
+})
+
+client.on('data', function (data) {
+ console.log('Received response: %d bytes', data.byteLength)
+ if (response == null) {
+ if (data.byteLength > 1) {
+ const plen = data.readUInt16BE(0)
+ expectedLength = plen
+ if (plen < 12) {
+ throw new Error('below DNS minimum packet length')
+ }
+ response = Buffer.from(data)
+ }
+ } else {
+ response = Buffer.concat([response, data])
+ }
+
+ if (response.byteLength >= expectedLength) {
+ console.log(dnsPacket.streamDecode(response))
+ client.destroy()
+ }
+})
+
+client.on('close', function () {
+ console.log('Connection closed')
+})
diff --git a/testing/xpcshell/dns-packet/examples/tls.js b/testing/xpcshell/dns-packet/examples/tls.js
new file mode 100644
index 0000000000..694a4fecfa
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/tls.js
@@ -0,0 +1,61 @@
+'use strict'
+
+const tls = require('tls')
+const dnsPacket = require('..')
+
+var response = null
+var expectedLength = 0
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.streamEncode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+const context = tls.createSecureContext({
+ secureProtocol: 'TLSv1_2_method'
+})
+
+const options = {
+ port: 853,
+ host: 'getdnsapi.net',
+ secureContext: context
+}
+
+const client = tls.connect(options, () => {
+ console.log('client connected')
+ client.write(buf)
+})
+
+client.on('data', function (data) {
+ console.log('Received response: %d bytes', data.byteLength)
+ if (response == null) {
+ if (data.byteLength > 1) {
+ const plen = data.readUInt16BE(0)
+ expectedLength = plen
+ if (plen < 12) {
+ throw new Error('below DNS minimum packet length')
+ }
+ response = Buffer.from(data)
+ }
+ } else {
+ response = Buffer.concat([response, data])
+ }
+
+ if (response.byteLength >= expectedLength) {
+ console.log(dnsPacket.streamDecode(response))
+ client.destroy()
+ }
+})
+
+client.on('end', () => {
+ console.log('Connection ended')
+})
diff --git a/testing/xpcshell/dns-packet/examples/udp.js b/testing/xpcshell/dns-packet/examples/udp.js
new file mode 100644
index 0000000000..0f9df9d794
--- /dev/null
+++ b/testing/xpcshell/dns-packet/examples/udp.js
@@ -0,0 +1,28 @@
+'use strict'
+
+const dnsPacket = require('..')
+const dgram = require('dgram')
+
+const socket = dgram.createSocket('udp4')
+
+function getRandomInt (min, max) {
+ return Math.floor(Math.random() * (max - min + 1)) + min
+}
+
+const buf = dnsPacket.encode({
+ type: 'query',
+ id: getRandomInt(1, 65534),
+ flags: dnsPacket.RECURSION_DESIRED,
+ questions: [{
+ type: 'A',
+ name: 'google.com'
+ }]
+})
+
+socket.on('message', function (message, rinfo) {
+ console.log(rinfo)
+ console.log(dnsPacket.decode(message)) // prints out a response from google dns
+ socket.close()
+})
+
+socket.send(buf, 0, buf.length, 53, '8.8.8.8')
diff --git a/testing/xpcshell/dns-packet/index.js b/testing/xpcshell/dns-packet/index.js
new file mode 100644
index 0000000000..f1b7352731
--- /dev/null
+++ b/testing/xpcshell/dns-packet/index.js
@@ -0,0 +1,1841 @@
+'use strict'
+
+const types = require('./types')
+const rcodes = require('./rcodes')
+exports.rcodes = rcodes;
+const opcodes = require('./opcodes')
+const classes = require('./classes')
+const optioncodes = require('./optioncodes')
+const ip = require('../node_ip')
+
+const QUERY_FLAG = 0
+const RESPONSE_FLAG = 1 << 15
+const FLUSH_MASK = 1 << 15
+const NOT_FLUSH_MASK = ~FLUSH_MASK
+const QU_MASK = 1 << 15
+const NOT_QU_MASK = ~QU_MASK
+
+const name = exports.txt = exports.name = {}
+
+name.encode = function (str, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(name.encodingLength(str))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ // strip leading and trailing .
+ const n = str.replace(/^\.|\.$/gm, '')
+ if (n.length) {
+ const list = n.split('.')
+
+ for (let i = 0; i < list.length; i++) {
+ const len = buf.write(list[i], offset + 1)
+ buf[offset] = len
+ offset += len + 1
+ }
+ }
+
+ buf[offset++] = 0
+
+ name.encode.bytes = offset - oldOffset
+ return buf
+}
+
+name.encode.bytes = 0
+
+name.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const list = []
+ const oldOffset = offset
+ let len = buf[offset++]
+
+ if (len === 0) {
+ name.decode.bytes = 1
+ return '.'
+ }
+ if (len >= 0xc0) {
+ const res = name.decode(buf, buf.readUInt16BE(offset - 1) - 0xc000)
+ name.decode.bytes = 2
+ return res
+ }
+
+ while (len) {
+ if (len >= 0xc0) {
+ list.push(name.decode(buf, buf.readUInt16BE(offset - 1) - 0xc000))
+ offset++
+ break
+ }
+
+ list.push(buf.toString('utf-8', offset, offset + len))
+ offset += len
+ len = buf[offset++]
+ }
+
+ name.decode.bytes = offset - oldOffset
+ return list.join('.')
+}
+
+name.decode.bytes = 0
+
+name.encodingLength = function (n) {
+ if (n === '.') return 1
+ return Buffer.byteLength(n) + 2
+}
+
+const string = {}
+
+string.encode = function (s, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(string.encodingLength(s))
+ if (!offset) offset = 0
+
+ const len = buf.write(s, offset + 1)
+ buf[offset] = len
+ string.encode.bytes = len + 1
+ return buf
+}
+
+string.encode.bytes = 0
+
+string.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf[offset]
+ const s = buf.toString('utf-8', offset + 1, offset + 1 + len)
+ string.decode.bytes = len + 1
+ return s
+}
+
+string.decode.bytes = 0
+
+string.encodingLength = function (s) {
+ return Buffer.byteLength(s) + 1
+}
+
+const header = {}
+
+header.encode = function (h, buf, offset) {
+ if (!buf) buf = header.encodingLength(h)
+ if (!offset) offset = 0
+
+ const flags = (h.flags || 0) & 32767
+ const type = h.type === 'response' ? RESPONSE_FLAG : QUERY_FLAG
+
+ buf.writeUInt16BE(h.id || 0, offset)
+ buf.writeUInt16BE(flags | type, offset + 2)
+ buf.writeUInt16BE(h.questions.length, offset + 4)
+ buf.writeUInt16BE(h.answers.length, offset + 6)
+ buf.writeUInt16BE(h.authorities.length, offset + 8)
+ buf.writeUInt16BE(h.additionals.length, offset + 10)
+
+ return buf
+}
+
+header.encode.bytes = 12
+
+header.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ if (buf.length < 12) throw new Error('Header must be 12 bytes')
+ const flags = buf.readUInt16BE(offset + 2)
+
+ return {
+ id: buf.readUInt16BE(offset),
+ type: flags & RESPONSE_FLAG ? 'response' : 'query',
+ flags: flags & 32767,
+ flag_qr: ((flags >> 15) & 0x1) === 1,
+ opcode: opcodes.toString((flags >> 11) & 0xf),
+ flag_aa: ((flags >> 10) & 0x1) === 1,
+ flag_tc: ((flags >> 9) & 0x1) === 1,
+ flag_rd: ((flags >> 8) & 0x1) === 1,
+ flag_ra: ((flags >> 7) & 0x1) === 1,
+ flag_z: ((flags >> 6) & 0x1) === 1,
+ flag_ad: ((flags >> 5) & 0x1) === 1,
+ flag_cd: ((flags >> 4) & 0x1) === 1,
+ rcode: rcodes.toString(flags & 0xf),
+ questions: new Array(buf.readUInt16BE(offset + 4)),
+ answers: new Array(buf.readUInt16BE(offset + 6)),
+ authorities: new Array(buf.readUInt16BE(offset + 8)),
+ additionals: new Array(buf.readUInt16BE(offset + 10))
+ }
+}
+
+header.decode.bytes = 12
+
+header.encodingLength = function () {
+ return 12
+}
+
+const runknown = exports.unknown = {}
+
+runknown.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(runknown.encodingLength(data))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(data.length, offset)
+ data.copy(buf, offset + 2)
+
+ runknown.encode.bytes = data.length + 2
+ return buf
+}
+
+runknown.encode.bytes = 0
+
+runknown.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+ const data = buf.slice(offset + 2, offset + 2 + len)
+ runknown.decode.bytes = len + 2
+ return data
+}
+
+runknown.decode.bytes = 0
+
+runknown.encodingLength = function (data) {
+ return data.length + 2
+}
+
+const rns = exports.ns = {}
+
+rns.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rns.encodingLength(data))
+ if (!offset) offset = 0
+
+ name.encode(data, buf, offset + 2)
+ buf.writeUInt16BE(name.encode.bytes, offset)
+ rns.encode.bytes = name.encode.bytes + 2
+ return buf
+}
+
+rns.encode.bytes = 0
+
+rns.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+ const dd = name.decode(buf, offset + 2)
+
+ rns.decode.bytes = len + 2
+ return dd
+}
+
+rns.decode.bytes = 0
+
+rns.encodingLength = function (data) {
+ return name.encodingLength(data) + 2
+}
+
+const rsoa = exports.soa = {}
+
+rsoa.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rsoa.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+ name.encode(data.mname, buf, offset)
+ offset += name.encode.bytes
+ name.encode(data.rname, buf, offset)
+ offset += name.encode.bytes
+ buf.writeUInt32BE(data.serial || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.refresh || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.retry || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.expire || 0, offset)
+ offset += 4
+ buf.writeUInt32BE(data.minimum || 0, offset)
+ offset += 4
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rsoa.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rsoa.encode.bytes = 0
+
+rsoa.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.mname = name.decode(buf, offset)
+ offset += name.decode.bytes
+ data.rname = name.decode(buf, offset)
+ offset += name.decode.bytes
+ data.serial = buf.readUInt32BE(offset)
+ offset += 4
+ data.refresh = buf.readUInt32BE(offset)
+ offset += 4
+ data.retry = buf.readUInt32BE(offset)
+ offset += 4
+ data.expire = buf.readUInt32BE(offset)
+ offset += 4
+ data.minimum = buf.readUInt32BE(offset)
+ offset += 4
+
+ rsoa.decode.bytes = offset - oldOffset
+ return data
+}
+
+rsoa.decode.bytes = 0
+
+rsoa.encodingLength = function (data) {
+ return 22 + name.encodingLength(data.mname) + name.encodingLength(data.rname)
+}
+
+const rtxt = exports.txt = {}
+
+rtxt.encode = function (data, buf, offset) {
+ if (!Array.isArray(data)) data = [data]
+ for (let i = 0; i < data.length; i++) {
+ if (typeof data[i] === 'string') {
+ data[i] = Buffer.from(data[i])
+ }
+ if (!Buffer.isBuffer(data[i])) {
+ throw new Error('Must be a Buffer')
+ }
+ }
+
+ if (!buf) buf = Buffer.allocUnsafe(rtxt.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+
+ data.forEach(function (d) {
+ buf[offset++] = d.length
+ d.copy(buf, offset, 0, d.length)
+ offset += d.length
+ })
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rtxt.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rtxt.encode.bytes = 0
+
+rtxt.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+ let remaining = buf.readUInt16BE(offset)
+ offset += 2
+
+ let data = []
+ while (remaining > 0) {
+ const len = buf[offset++]
+ --remaining
+ if (remaining < len) {
+ throw new Error('Buffer overflow')
+ }
+ data.push(buf.slice(offset, offset + len))
+ offset += len
+ remaining -= len
+ }
+
+ rtxt.decode.bytes = offset - oldOffset
+ return data
+}
+
+rtxt.decode.bytes = 0
+
+rtxt.encodingLength = function (data) {
+ if (!Array.isArray(data)) data = [data]
+ let length = 2
+ data.forEach(function (buf) {
+ if (typeof buf === 'string') {
+ length += Buffer.byteLength(buf) + 1
+ } else {
+ length += buf.length + 1
+ }
+ })
+ return length
+}
+
+const rnull = exports.null = {}
+
+rnull.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rnull.encodingLength(data))
+ if (!offset) offset = 0
+
+ if (typeof data === 'string') data = Buffer.from(data)
+ if (!data) data = Buffer.allocUnsafe(0)
+
+ const oldOffset = offset
+ offset += 2
+
+ const len = data.length
+ data.copy(buf, offset, 0, len)
+ offset += len
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rnull.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rnull.encode.bytes = 0
+
+rnull.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+ const len = buf.readUInt16BE(offset)
+
+ offset += 2
+
+ const data = buf.slice(offset, offset + len)
+ offset += len
+
+ rnull.decode.bytes = offset - oldOffset
+ return data
+}
+
+rnull.decode.bytes = 0
+
+rnull.encodingLength = function (data) {
+ if (!data) return 2
+ return (Buffer.isBuffer(data) ? data.length : Buffer.byteLength(data)) + 2
+}
+
+const rhinfo = exports.hinfo = {}
+
+rhinfo.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rhinfo.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+ string.encode(data.cpu, buf, offset)
+ offset += string.encode.bytes
+ string.encode(data.os, buf, offset)
+ offset += string.encode.bytes
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rhinfo.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rhinfo.encode.bytes = 0
+
+rhinfo.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.cpu = string.decode(buf, offset)
+ offset += string.decode.bytes
+ data.os = string.decode(buf, offset)
+ offset += string.decode.bytes
+ rhinfo.decode.bytes = offset - oldOffset
+ return data
+}
+
+rhinfo.decode.bytes = 0
+
+rhinfo.encodingLength = function (data) {
+ return string.encodingLength(data.cpu) + string.encodingLength(data.os) + 2
+}
+
+const rptr = exports.ptr = {}
+const rcname = exports.cname = rptr
+const rdname = exports.dname = rptr
+
+rptr.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rptr.encodingLength(data))
+ if (!offset) offset = 0
+
+ name.encode(data, buf, offset + 2)
+ buf.writeUInt16BE(name.encode.bytes, offset)
+ rptr.encode.bytes = name.encode.bytes + 2
+ return buf
+}
+
+rptr.encode.bytes = 0
+
+rptr.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const data = name.decode(buf, offset + 2)
+ rptr.decode.bytes = name.decode.bytes + 2
+ return data
+}
+
+rptr.decode.bytes = 0
+
+rptr.encodingLength = function (data) {
+ return name.encodingLength(data) + 2
+}
+
+const rsrv = exports.srv = {}
+
+rsrv.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rsrv.encodingLength(data))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(data.priority || 0, offset + 2)
+ buf.writeUInt16BE(data.weight || 0, offset + 4)
+ buf.writeUInt16BE(data.port || 0, offset + 6)
+ name.encode(data.target, buf, offset + 8)
+
+ const len = name.encode.bytes + 6
+ buf.writeUInt16BE(len, offset)
+
+ rsrv.encode.bytes = len + 2
+ return buf
+}
+
+rsrv.encode.bytes = 0
+
+rsrv.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+
+ const data = {}
+ data.priority = buf.readUInt16BE(offset + 2)
+ data.weight = buf.readUInt16BE(offset + 4)
+ data.port = buf.readUInt16BE(offset + 6)
+ data.target = name.decode(buf, offset + 8)
+
+ rsrv.decode.bytes = len + 2
+ return data
+}
+
+rsrv.decode.bytes = 0
+
+rsrv.encodingLength = function (data) {
+ return 8 + name.encodingLength(data.target)
+}
+
+const rcaa = exports.caa = {}
+
+rcaa.ISSUER_CRITICAL = 1 << 7
+
+rcaa.encode = function (data, buf, offset) {
+ const len = rcaa.encodingLength(data)
+
+ if (!buf) buf = Buffer.allocUnsafe(rcaa.encodingLength(data))
+ if (!offset) offset = 0
+
+ if (data.issuerCritical) {
+ data.flags = rcaa.ISSUER_CRITICAL
+ }
+
+ buf.writeUInt16BE(len - 2, offset)
+ offset += 2
+ buf.writeUInt8(data.flags || 0, offset)
+ offset += 1
+ string.encode(data.tag, buf, offset)
+ offset += string.encode.bytes
+ buf.write(data.value, offset)
+ offset += Buffer.byteLength(data.value)
+
+ rcaa.encode.bytes = len
+ return buf
+}
+
+rcaa.encode.bytes = 0
+
+rcaa.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const len = buf.readUInt16BE(offset)
+ offset += 2
+
+ const oldOffset = offset
+ const data = {}
+ data.flags = buf.readUInt8(offset)
+ offset += 1
+ data.tag = string.decode(buf, offset)
+ offset += string.decode.bytes
+ data.value = buf.toString('utf-8', offset, oldOffset + len)
+
+ data.issuerCritical = !!(data.flags & rcaa.ISSUER_CRITICAL)
+
+ rcaa.decode.bytes = len + 2
+
+ return data
+}
+
+rcaa.decode.bytes = 0
+
+rcaa.encodingLength = function (data) {
+ return string.encodingLength(data.tag) + string.encodingLength(data.value) + 2
+}
+
+const rmx = exports.mx = {}
+
+rmx.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rmx.encodingLength(data))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ offset += 2
+ buf.writeUInt16BE(data.preference || 0, offset)
+ offset += 2
+ name.encode(data.exchange, buf, offset)
+ offset += name.encode.bytes
+
+ buf.writeUInt16BE(offset - oldOffset - 2, oldOffset)
+ rmx.encode.bytes = offset - oldOffset
+ return buf
+}
+
+rmx.encode.bytes = 0
+
+rmx.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.preference = buf.readUInt16BE(offset)
+ offset += 2
+ data.exchange = name.decode(buf, offset)
+ offset += name.decode.bytes
+
+ rmx.decode.bytes = offset - oldOffset
+ return data
+}
+
+rmx.encodingLength = function (data) {
+ return 4 + name.encodingLength(data.exchange)
+}
+
+const ra = exports.a = {}
+
+ra.encode = function (host, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(ra.encodingLength(host))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(4, offset)
+ offset += 2
+ ip.toBuffer(host, buf, offset)
+ ra.encode.bytes = 6
+ return buf
+}
+
+ra.encode.bytes = 0
+
+ra.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ offset += 2
+ const host = ip.toString(buf, offset, 4)
+ ra.decode.bytes = 6
+ return host
+}
+ra.decode.bytes = 0
+
+ra.encodingLength = function () {
+ return 6
+}
+
+const raaaa = exports.aaaa = {}
+
+raaaa.encode = function (host, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(raaaa.encodingLength(host))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(16, offset)
+ offset += 2
+ ip.toBuffer(host, buf, offset)
+ raaaa.encode.bytes = 18
+ return buf
+}
+
+raaaa.encode.bytes = 0
+
+raaaa.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ offset += 2
+ const host = ip.toString(buf, offset, 16)
+ raaaa.decode.bytes = 18
+ return host
+}
+
+raaaa.decode.bytes = 0
+
+raaaa.encodingLength = function () {
+ return 18
+}
+
+const roption = exports.option = {}
+
+roption.encode = function (option, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(roption.encodingLength(option))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const code = optioncodes.toCode(option.code)
+ buf.writeUInt16BE(code, offset)
+ offset += 2
+ if (option.data) {
+ buf.writeUInt16BE(option.data.length, offset)
+ offset += 2
+ option.data.copy(buf, offset)
+ offset += option.data.length
+ } else {
+ switch (code) {
+ // case 3: NSID. No encode makes sense.
+ // case 5,6,7: Not implementable
+ case 8: // ECS
+ // note: do IP math before calling
+ const spl = option.sourcePrefixLength || 0
+ const fam = option.family || (ip.isV4Format(option.ip) ? 1 : 2)
+ const ipBuf = ip.toBuffer(option.ip)
+ const ipLen = Math.ceil(spl / 8)
+ buf.writeUInt16BE(ipLen + 4, offset)
+ offset += 2
+ buf.writeUInt16BE(fam, offset)
+ offset += 2
+ buf.writeUInt8(spl, offset++)
+ buf.writeUInt8(option.scopePrefixLength || 0, offset++)
+
+ ipBuf.copy(buf, offset, 0, ipLen)
+ offset += ipLen
+ break
+ // case 9: EXPIRE (experimental)
+ // case 10: COOKIE. No encode makes sense.
+ case 11: // KEEP-ALIVE
+ if (option.timeout) {
+ buf.writeUInt16BE(2, offset)
+ offset += 2
+ buf.writeUInt16BE(option.timeout, offset)
+ offset += 2
+ } else {
+ buf.writeUInt16BE(0, offset)
+ offset += 2
+ }
+ break
+ case 12: // PADDING
+ const len = option.length || 0
+ buf.writeUInt16BE(len, offset)
+ offset += 2
+ buf.fill(0, offset, offset + len)
+ offset += len
+ break
+ // case 13: CHAIN. Experimental.
+ case 14: // KEY-TAG
+ const tagsLen = option.tags.length * 2
+ buf.writeUInt16BE(tagsLen, offset)
+ offset += 2
+ for (const tag of option.tags) {
+ buf.writeUInt16BE(tag, offset)
+ offset += 2
+ }
+ break
+ case 15: // EDNS_ERROR
+ const text = option.text || "";
+ buf.writeUInt16BE(text.length + 2, offset)
+ offset += 2;
+ buf.writeUInt16BE(option.extended_error, offset)
+ offset += 2;
+ buf.write(text, offset);
+ offset += option.text.length;
+ break;
+ default:
+ throw new Error(`Unknown roption code: ${option.code}`)
+ }
+ }
+
+ roption.encode.bytes = offset - oldOffset
+ return buf
+}
+
+roption.encode.bytes = 0
+
+roption.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const option = {}
+ option.code = buf.readUInt16BE(offset)
+ option.type = optioncodes.toString(option.code)
+ offset += 2
+ const len = buf.readUInt16BE(offset)
+ offset += 2
+ option.data = buf.slice(offset, offset + len)
+ switch (option.code) {
+ // case 3: NSID. No decode makes sense.
+ case 8: // ECS
+ option.family = buf.readUInt16BE(offset)
+ offset += 2
+ option.sourcePrefixLength = buf.readUInt8(offset++)
+ option.scopePrefixLength = buf.readUInt8(offset++)
+ const padded = Buffer.alloc((option.family === 1) ? 4 : 16)
+ buf.copy(padded, 0, offset, offset + len - 4)
+ option.ip = ip.toString(padded)
+ break
+ // case 12: Padding. No decode makes sense.
+ case 11: // KEEP-ALIVE
+ if (len > 0) {
+ option.timeout = buf.readUInt16BE(offset)
+ offset += 2
+ }
+ break
+ case 14:
+ option.tags = []
+ for (let i = 0; i < len; i += 2) {
+ option.tags.push(buf.readUInt16BE(offset))
+ offset += 2
+ }
+ // don't worry about default. caller will use data if desired
+ }
+
+ roption.decode.bytes = len + 4
+ return option
+}
+
+roption.decode.bytes = 0
+
+roption.encodingLength = function (option) {
+ if (option.data) {
+ return option.data.length + 4
+ }
+ const code = optioncodes.toCode(option.code)
+ switch (code) {
+ case 8: // ECS
+ const spl = option.sourcePrefixLength || 0
+ return Math.ceil(spl / 8) + 8
+ case 11: // KEEP-ALIVE
+ return (typeof option.timeout === 'number') ? 6 : 4
+ case 12: // PADDING
+ return option.length + 4
+ case 14: // KEY-TAG
+ return 4 + (option.tags.length * 2)
+ case 15: // EDNS_ERROR
+ return 4 + 2 + option.text.length
+ }
+ throw new Error(`Unknown roption code: ${option.code}`)
+}
+
+const ropt = exports.opt = {}
+
+ropt.encode = function (options, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(ropt.encodingLength(options))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const rdlen = encodingLengthList(options, roption)
+ buf.writeUInt16BE(rdlen, offset)
+ offset = encodeList(options, roption, buf, offset + 2)
+
+ ropt.encode.bytes = offset - oldOffset
+ return buf
+}
+
+ropt.encode.bytes = 0
+
+ropt.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const options = []
+ let rdlen = buf.readUInt16BE(offset)
+ offset += 2
+ let o = 0
+ while (rdlen > 0) {
+ options[o++] = roption.decode(buf, offset)
+ offset += roption.decode.bytes
+ rdlen -= roption.decode.bytes
+ }
+ ropt.decode.bytes = offset - oldOffset
+ return options
+}
+
+ropt.decode.bytes = 0
+
+ropt.encodingLength = function (options) {
+ return 2 + encodingLengthList(options || [], roption)
+}
+
+const rdnskey = exports.dnskey = {}
+
+rdnskey.PROTOCOL_DNSSEC = 3
+rdnskey.ZONE_KEY = 0x80
+rdnskey.SECURE_ENTRYPOINT = 0x8000
+
+rdnskey.encode = function (key, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rdnskey.encodingLength(key))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const keydata = key.key
+ if (!Buffer.isBuffer(keydata)) {
+ throw new Error('Key must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt16BE(key.flags, offset)
+ offset += 2
+ buf.writeUInt8(rdnskey.PROTOCOL_DNSSEC, offset)
+ offset += 1
+ buf.writeUInt8(key.algorithm, offset)
+ offset += 1
+ keydata.copy(buf, offset, 0, keydata.length)
+ offset += keydata.length
+
+ rdnskey.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rdnskey.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rdnskey.encode.bytes = 0
+
+rdnskey.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var key = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ key.flags = buf.readUInt16BE(offset)
+ offset += 2
+ if (buf.readUInt8(offset) !== rdnskey.PROTOCOL_DNSSEC) {
+ throw new Error('Protocol must be 3')
+ }
+ offset += 1
+ key.algorithm = buf.readUInt8(offset)
+ offset += 1
+ key.key = buf.slice(offset, oldOffset + length + 2)
+ offset += key.key.length
+ rdnskey.decode.bytes = offset - oldOffset
+ return key
+}
+
+rdnskey.decode.bytes = 0
+
+rdnskey.encodingLength = function (key) {
+ return 6 + Buffer.byteLength(key.key)
+}
+
+const rrrsig = exports.rrsig = {}
+
+rrrsig.encode = function (sig, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rrrsig.encodingLength(sig))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const signature = sig.signature
+ if (!Buffer.isBuffer(signature)) {
+ throw new Error('Signature must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt16BE(types.toType(sig.typeCovered), offset)
+ offset += 2
+ buf.writeUInt8(sig.algorithm, offset)
+ offset += 1
+ buf.writeUInt8(sig.labels, offset)
+ offset += 1
+ buf.writeUInt32BE(sig.originalTTL, offset)
+ offset += 4
+ buf.writeUInt32BE(sig.expiration, offset)
+ offset += 4
+ buf.writeUInt32BE(sig.inception, offset)
+ offset += 4
+ buf.writeUInt16BE(sig.keyTag, offset)
+ offset += 2
+ name.encode(sig.signersName, buf, offset)
+ offset += name.encode.bytes
+ signature.copy(buf, offset, 0, signature.length)
+ offset += signature.length
+
+ rrrsig.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rrrsig.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rrrsig.encode.bytes = 0
+
+rrrsig.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var sig = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ sig.typeCovered = types.toString(buf.readUInt16BE(offset))
+ offset += 2
+ sig.algorithm = buf.readUInt8(offset)
+ offset += 1
+ sig.labels = buf.readUInt8(offset)
+ offset += 1
+ sig.originalTTL = buf.readUInt32BE(offset)
+ offset += 4
+ sig.expiration = buf.readUInt32BE(offset)
+ offset += 4
+ sig.inception = buf.readUInt32BE(offset)
+ offset += 4
+ sig.keyTag = buf.readUInt16BE(offset)
+ offset += 2
+ sig.signersName = name.decode(buf, offset)
+ offset += name.decode.bytes
+ sig.signature = buf.slice(offset, oldOffset + length + 2)
+ offset += sig.signature.length
+ rrrsig.decode.bytes = offset - oldOffset
+ return sig
+}
+
+rrrsig.decode.bytes = 0
+
+rrrsig.encodingLength = function (sig) {
+ return 20 +
+ name.encodingLength(sig.signersName) +
+ Buffer.byteLength(sig.signature)
+}
+
+const rrp = exports.rp = {}
+
+rrp.encode = function (data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rrp.encodingLength(data))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ offset += 2 // Leave space for length
+ name.encode(data.mbox || '.', buf, offset)
+ offset += name.encode.bytes
+ name.encode(data.txt || '.', buf, offset)
+ offset += name.encode.bytes
+ rrp.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rrp.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rrp.encode.bytes = 0
+
+rrp.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const data = {}
+ offset += 2
+ data.mbox = name.decode(buf, offset) || '.'
+ offset += name.decode.bytes
+ data.txt = name.decode(buf, offset) || '.'
+ offset += name.decode.bytes
+ rrp.decode.bytes = offset - oldOffset
+ return data
+}
+
+rrp.decode.bytes = 0
+
+rrp.encodingLength = function (data) {
+ return 2 + name.encodingLength(data.mbox || '.') + name.encodingLength(data.txt || '.')
+}
+
+const typebitmap = {}
+
+typebitmap.encode = function (typelist, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(typebitmap.encodingLength(typelist))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var typesByWindow = []
+ for (var i = 0; i < typelist.length; i++) {
+ var typeid = types.toType(typelist[i])
+ if (typesByWindow[typeid >> 8] === undefined) {
+ typesByWindow[typeid >> 8] = []
+ }
+ typesByWindow[typeid >> 8][(typeid >> 3) & 0x1F] |= 1 << (7 - (typeid & 0x7))
+ }
+
+ for (i = 0; i < typesByWindow.length; i++) {
+ if (typesByWindow[i] !== undefined) {
+ var windowBuf = Buffer.from(typesByWindow[i])
+ buf.writeUInt8(i, offset)
+ offset += 1
+ buf.writeUInt8(windowBuf.length, offset)
+ offset += 1
+ windowBuf.copy(buf, offset)
+ offset += windowBuf.length
+ }
+ }
+
+ typebitmap.encode.bytes = offset - oldOffset
+ return buf
+}
+
+typebitmap.encode.bytes = 0
+
+typebitmap.decode = function (buf, offset, length) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var typelist = []
+ while (offset - oldOffset < length) {
+ var window = buf.readUInt8(offset)
+ offset += 1
+ var windowLength = buf.readUInt8(offset)
+ offset += 1
+ for (var i = 0; i < windowLength; i++) {
+ var b = buf.readUInt8(offset + i)
+ for (var j = 0; j < 8; j++) {
+ if (b & (1 << (7 - j))) {
+ var typeid = types.toString((window << 8) | (i << 3) | j)
+ typelist.push(typeid)
+ }
+ }
+ }
+ offset += windowLength
+ }
+
+ typebitmap.decode.bytes = offset - oldOffset
+ return typelist
+}
+
+typebitmap.decode.bytes = 0
+
+typebitmap.encodingLength = function (typelist) {
+ var extents = []
+ for (var i = 0; i < typelist.length; i++) {
+ var typeid = types.toType(typelist[i])
+ extents[typeid >> 8] = Math.max(extents[typeid >> 8] || 0, typeid & 0xFF)
+ }
+
+ var len = 0
+ for (i = 0; i < extents.length; i++) {
+ if (extents[i] !== undefined) {
+ len += 2 + Math.ceil((extents[i] + 1) / 8)
+ }
+ }
+
+ return len
+}
+
+const rnsec = exports.nsec = {}
+
+rnsec.encode = function (record, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rnsec.encodingLength(record))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ offset += 2 // Leave space for length
+ name.encode(record.nextDomain, buf, offset)
+ offset += name.encode.bytes
+ typebitmap.encode(record.rrtypes, buf, offset)
+ offset += typebitmap.encode.bytes
+
+ rnsec.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rnsec.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rnsec.encode.bytes = 0
+
+rnsec.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var record = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ record.nextDomain = name.decode(buf, offset)
+ offset += name.decode.bytes
+ record.rrtypes = typebitmap.decode(buf, offset, length - (offset - oldOffset))
+ offset += typebitmap.decode.bytes
+
+ rnsec.decode.bytes = offset - oldOffset
+ return record
+}
+
+rnsec.decode.bytes = 0
+
+rnsec.encodingLength = function (record) {
+ return 2 +
+ name.encodingLength(record.nextDomain) +
+ typebitmap.encodingLength(record.rrtypes)
+}
+
+const rnsec3 = exports.nsec3 = {}
+
+rnsec3.encode = function (record, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rnsec3.encodingLength(record))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const salt = record.salt
+ if (!Buffer.isBuffer(salt)) {
+ throw new Error('salt must be a Buffer')
+ }
+
+ const nextDomain = record.nextDomain
+ if (!Buffer.isBuffer(nextDomain)) {
+ throw new Error('nextDomain must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt8(record.algorithm, offset)
+ offset += 1
+ buf.writeUInt8(record.flags, offset)
+ offset += 1
+ buf.writeUInt16BE(record.iterations, offset)
+ offset += 2
+ buf.writeUInt8(salt.length, offset)
+ offset += 1
+ salt.copy(buf, offset, 0, salt.length)
+ offset += salt.length
+ buf.writeUInt8(nextDomain.length, offset)
+ offset += 1
+ nextDomain.copy(buf, offset, 0, nextDomain.length)
+ offset += nextDomain.length
+ typebitmap.encode(record.rrtypes, buf, offset)
+ offset += typebitmap.encode.bytes
+
+ rnsec3.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rnsec3.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rnsec3.encode.bytes = 0
+
+rnsec3.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var record = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ record.algorithm = buf.readUInt8(offset)
+ offset += 1
+ record.flags = buf.readUInt8(offset)
+ offset += 1
+ record.iterations = buf.readUInt16BE(offset)
+ offset += 2
+ const saltLength = buf.readUInt8(offset)
+ offset += 1
+ record.salt = buf.slice(offset, offset + saltLength)
+ offset += saltLength
+ const hashLength = buf.readUInt8(offset)
+ offset += 1
+ record.nextDomain = buf.slice(offset, offset + hashLength)
+ offset += hashLength
+ record.rrtypes = typebitmap.decode(buf, offset, length - (offset - oldOffset))
+ offset += typebitmap.decode.bytes
+
+ rnsec3.decode.bytes = offset - oldOffset
+ return record
+}
+
+rnsec3.decode.bytes = 0
+
+rnsec3.encodingLength = function (record) {
+ return 8 +
+ record.salt.length +
+ record.nextDomain.length +
+ typebitmap.encodingLength(record.rrtypes)
+}
+
+const rds = exports.ds = {}
+
+rds.encode = function (digest, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rds.encodingLength(digest))
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ const digestdata = digest.digest
+ if (!Buffer.isBuffer(digestdata)) {
+ throw new Error('Digest must be a Buffer')
+ }
+
+ offset += 2 // Leave space for length
+ buf.writeUInt16BE(digest.keyTag, offset)
+ offset += 2
+ buf.writeUInt8(digest.algorithm, offset)
+ offset += 1
+ buf.writeUInt8(digest.digestType, offset)
+ offset += 1
+ digestdata.copy(buf, offset, 0, digestdata.length)
+ offset += digestdata.length
+
+ rds.encode.bytes = offset - oldOffset
+ buf.writeUInt16BE(rds.encode.bytes - 2, oldOffset)
+ return buf
+}
+
+rds.encode.bytes = 0
+
+rds.decode = function (buf, offset) {
+ if (!offset) offset = 0
+ const oldOffset = offset
+
+ var digest = {}
+ var length = buf.readUInt16BE(offset)
+ offset += 2
+ digest.keyTag = buf.readUInt16BE(offset)
+ offset += 2
+ digest.algorithm = buf.readUInt8(offset)
+ offset += 1
+ digest.digestType = buf.readUInt8(offset)
+ offset += 1
+ digest.digest = buf.slice(offset, oldOffset + length + 2)
+ offset += digest.digest.length
+ rds.decode.bytes = offset - oldOffset
+ return digest
+}
+
+rds.decode.bytes = 0
+
+rds.encodingLength = function (digest) {
+ return 6 + Buffer.byteLength(digest.digest)
+}
+
+const svcparam = exports.svcparam = {}
+
+svcparam.keyToNumber = function(keyName) {
+ switch (keyName.toLowerCase()) {
+ case 'mandatory': return 0
+ case 'alpn' : return 1
+ case 'no-default-alpn' : return 2
+ case 'port' : return 3
+ case 'ipv4hint' : return 4
+ case 'echconfig' : return 5
+ case 'ipv6hint' : return 6
+ case 'odoh' : return 32769
+ case 'key65535' : return 65535
+ }
+ if (!keyName.startsWith('key')) {
+ throw new Error(`Name must start with key: ${keyName}`);
+ }
+
+ return Number.parseInt(keyName.substring(3));
+}
+
+svcparam.numberToKeyName = function(number) {
+ switch (number) {
+ case 0 : return 'mandatory'
+ case 1 : return 'alpn'
+ case 2 : return 'no-default-alpn'
+ case 3 : return 'port'
+ case 4 : return 'ipv4hint'
+ case 5 : return 'echconfig'
+ case 6 : return 'ipv6hint'
+ case 32769 : return 'odoh'
+ }
+
+ return `key${number}`;
+}
+
+svcparam.encode = function(param, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(svcparam.encodingLength(param))
+ if (!offset) offset = 0
+
+ let key = param.key;
+ if (typeof param.key !== 'number') {
+ key = svcparam.keyToNumber(param.key);
+ }
+
+ buf.writeUInt16BE(key || 0, offset)
+ offset += 2;
+ svcparam.encode.bytes = 2;
+
+ if (key == 0) { // mandatory
+ let values = param.value;
+ if (!Array.isArray(values)) values = [values];
+ buf.writeUInt16BE(values.length*2, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let val of values) {
+ if (typeof val !== 'number') {
+ val = svcparam.keyToNumber(val);
+ }
+ buf.writeUInt16BE(val, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ }
+ } else if (key == 1) { // alpn
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ // The alpn param is prefixed by its length as a single byte, so the
+ // initialValue to reduce function is the length of the array.
+ let total = val.reduce(function(result, id) {
+ return result += id.length;
+ }, val.length);
+
+ buf.writeUInt16BE(total, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let id of val) {
+ buf.writeUInt8(id.length, offset);
+ offset += 1;
+ svcparam.encode.bytes += 1;
+
+ buf.write(id, offset);
+ offset += id.length;
+ svcparam.encode.bytes += id.length;
+ }
+ } else if (key == 2) { // no-default-alpn
+ buf.writeUInt16BE(0, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ } else if (key == 3) { // port
+ buf.writeUInt16BE(2, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ buf.writeUInt16BE(param.value || 0, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ } else if (key == 4) { //ipv4hint
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ buf.writeUInt16BE(val.length*4, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let host of val) {
+ ip.toBuffer(host, buf, offset)
+ offset += 4;
+ svcparam.encode.bytes += 4;
+ }
+ } else if (key == 5) { //echconfig
+ if (svcparam.ech) {
+ buf.writeUInt16BE(svcparam.ech.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ for (let i = 0; i < svcparam.ech.length; i++) {
+ buf.writeUInt8(svcparam.ech[i], offset);
+ offset++;
+ }
+ svcparam.encode.bytes += svcparam.ech.length;
+ } else {
+ buf.writeUInt16BE(param.value.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ buf.write(param.value, offset);
+ offset += param.value.length;
+ svcparam.encode.bytes += param.value.length;
+ }
+ } else if (key == 6) { //ipv6hint
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ buf.writeUInt16BE(val.length*16, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+
+ for (let host of val) {
+ ip.toBuffer(host, buf, offset)
+ offset += 16;
+ svcparam.encode.bytes += 16;
+ }
+ } else if (key == 32769) { //odoh
+ if (svcparam.odoh) {
+ buf.writeUInt16BE(svcparam.odoh.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ for (let i = 0; i < svcparam.odoh.length; i++) {
+ buf.writeUInt8(svcparam.odoh[i], offset);
+ offset++;
+ }
+ svcparam.encode.bytes += svcparam.odoh.length;
+ svcparam.odoh = null;
+ } else {
+ buf.writeUInt16BE(param.value.length, offset);
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ buf.write(param.value, offset);
+ offset += param.value.length;
+ svcparam.encode.bytes += param.value.length;
+ }
+ } else {
+ // Unknown option
+ buf.writeUInt16BE(0, offset); // 0 length since we don't know how to encode
+ offset += 2;
+ svcparam.encode.bytes += 2;
+ }
+
+}
+
+svcparam.encode.bytes = 0;
+
+svcparam.decode = function (buf, offset) {
+ let param = {};
+ let id = buf.readUInt16BE(offset);
+ param.key = svcparam.numberToKeyName(id);
+ offset += 2;
+ svcparam.decode.bytes = 2;
+
+ let len = buf.readUInt16BE(offset);
+ offset += 2;
+ svcparam.decode.bytes += 2;
+
+ param.value = buf.toString('utf-8', offset, offset + len);
+ offset += len;
+ svcparam.decode.bytes += len;
+
+ return param;
+}
+
+svcparam.decode.bytes = 0;
+
+svcparam.encodingLength = function (param) {
+ // 2 bytes for type, 2 bytes for length, what's left for the value
+
+ switch (param.key) {
+ case 'mandatory' : return 4 + 2*(Array.isArray(param.value) ? param.value.length : 1)
+ case 'alpn' : {
+ let val = param.value;
+ if (!Array.isArray(val)) val = [val];
+ let total = val.reduce(function(result, id) {
+ return result += id.length;
+ }, val.length);
+ return 4 + total;
+ }
+ case 'no-default-alpn' : return 4
+ case 'port' : return 4 + 2
+ case 'ipv4hint' : return 4 + 4 * (Array.isArray(param.value) ? param.value.length : 1)
+ case 'echconfig' : {
+ if (param.needBase64Decode) {
+ svcparam.ech = Buffer.from(param.value, "base64");
+ return 4 + svcparam.ech.length;
+ }
+ return 4 + param.value.length
+ }
+ case 'ipv6hint' : return 4 + 16 * (Array.isArray(param.value) ? param.value.length : 1)
+ case 'odoh' : {
+ if (param.needBase64Decode) {
+ svcparam.odoh = Buffer.from(param.value, "base64");
+ return 4 + svcparam.odoh.length;
+ }
+ return 4 + param.value.length
+ }
+ case 'key65535' : return 4
+ default: return 4 // unknown option
+ }
+}
+
+const rhttpssvc = exports.httpssvc = {}
+
+rhttpssvc.encode = function(data, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(rhttpssvc.encodingLength(data))
+ if (!offset) offset = 0
+
+ buf.writeUInt16BE(rhttpssvc.encodingLength(data) - 2 , offset);
+ offset += 2;
+
+ buf.writeUInt16BE(data.priority || 0, offset);
+ rhttpssvc.encode.bytes = 4;
+ offset += 2;
+ name.encode(data.name, buf, offset);
+ rhttpssvc.encode.bytes += name.encode.bytes;
+ offset += name.encode.bytes;
+
+ if (data.priority == 0) {
+ return;
+ }
+
+ for (let val of data.values) {
+ svcparam.encode(val, buf, offset);
+ offset += svcparam.encode.bytes;
+ rhttpssvc.encode.bytes += svcparam.encode.bytes;
+ }
+
+ return buf;
+}
+
+rhttpssvc.encode.bytes = 0;
+
+rhttpssvc.decode = function (buf, offset) {
+ let rdlen = buf.readUInt16BE(offset);
+ let oldOffset = offset;
+ offset += 2;
+ let record = {}
+ record.priority = buf.readUInt16BE(offset);
+ offset += 2;
+ rhttpssvc.decode.bytes = 4;
+ record.name = name.decode(buf, offset);
+ offset += name.decode.bytes;
+ rhttpssvc.decode.bytes += name.decode.bytes;
+
+ while (rdlen > rhttpssvc.decode.bytes - 2) {
+ let rec1 = svcparam.decode(buf, offset);
+ offset += svcparam.decode.bytes;
+ rhttpssvc.decode.bytes += svcparam.decode.bytes;
+ record.values.push(rec1);
+ }
+
+ return record;
+}
+
+rhttpssvc.decode.bytes = 0;
+
+rhttpssvc.encodingLength = function (data) {
+ let len =
+ 2 + // rdlen
+ 2 + // priority
+ name.encodingLength(data.name);
+ len += data.values.map(svcparam.encodingLength).reduce((acc, len) => acc + len, 0);
+ return len;
+}
+
+const renc = exports.record = function (type) {
+ switch (type.toUpperCase()) {
+ case 'A': return ra
+ case 'PTR': return rptr
+ case 'CNAME': return rcname
+ case 'DNAME': return rdname
+ case 'TXT': return rtxt
+ case 'NULL': return rnull
+ case 'AAAA': return raaaa
+ case 'SRV': return rsrv
+ case 'HINFO': return rhinfo
+ case 'CAA': return rcaa
+ case 'NS': return rns
+ case 'SOA': return rsoa
+ case 'MX': return rmx
+ case 'OPT': return ropt
+ case 'DNSKEY': return rdnskey
+ case 'RRSIG': return rrrsig
+ case 'RP': return rrp
+ case 'NSEC': return rnsec
+ case 'NSEC3': return rnsec3
+ case 'DS': return rds
+ case 'HTTPS': return rhttpssvc
+ }
+ return runknown
+}
+
+const answer = exports.answer = {}
+
+answer.encode = function (a, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(answer.encodingLength(a))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ name.encode(a.name, buf, offset)
+ offset += name.encode.bytes
+
+ buf.writeUInt16BE(types.toType(a.type), offset)
+
+ if (a.type.toUpperCase() === 'OPT') {
+ if (a.name !== '.') {
+ throw new Error('OPT name must be root.')
+ }
+ buf.writeUInt16BE(a.udpPayloadSize || 4096, offset + 2)
+ buf.writeUInt8(a.extendedRcode || 0, offset + 4)
+ buf.writeUInt8(a.ednsVersion || 0, offset + 5)
+ buf.writeUInt16BE(a.flags || 0, offset + 6)
+
+ offset += 8
+ ropt.encode(a.options || [], buf, offset)
+ offset += ropt.encode.bytes
+ } else {
+ let klass = classes.toClass(a.class === undefined ? 'IN' : a.class)
+ if (a.flush) klass |= FLUSH_MASK // the 1st bit of the class is the flush bit
+ buf.writeUInt16BE(klass, offset + 2)
+ buf.writeUInt32BE(a.ttl || 0, offset + 4)
+
+ offset += 8
+ const enc = renc(a.type)
+ enc.encode(a.data, buf, offset)
+ offset += enc.encode.bytes
+ }
+
+ answer.encode.bytes = offset - oldOffset
+ return buf
+}
+
+answer.encode.bytes = 0
+
+answer.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const a = {}
+ const oldOffset = offset
+
+ a.name = name.decode(buf, offset)
+ offset += name.decode.bytes
+ a.type = types.toString(buf.readUInt16BE(offset))
+ if (a.type === 'OPT') {
+ a.udpPayloadSize = buf.readUInt16BE(offset + 2)
+ a.extendedRcode = buf.readUInt8(offset + 4)
+ a.ednsVersion = buf.readUInt8(offset + 5)
+ a.flags = buf.readUInt16BE(offset + 6)
+ a.flag_do = ((a.flags >> 15) & 0x1) === 1
+ a.options = ropt.decode(buf, offset + 8)
+ offset += 8 + ropt.decode.bytes
+ } else {
+ const klass = buf.readUInt16BE(offset + 2)
+ a.ttl = buf.readUInt32BE(offset + 4)
+ a.class = classes.toString(klass & NOT_FLUSH_MASK)
+ a.flush = !!(klass & FLUSH_MASK)
+
+ const enc = renc(a.type)
+ a.data = enc.decode(buf, offset + 8)
+ offset += 8 + enc.decode.bytes
+ }
+
+ answer.decode.bytes = offset - oldOffset
+ return a
+}
+
+answer.decode.bytes = 0
+
+answer.encodingLength = function (a) {
+ const data = (a.data !== null && a.data !== undefined) ? a.data : a.options
+ return name.encodingLength(a.name) + 8 + renc(a.type).encodingLength(data)
+}
+
+const question = exports.question = {}
+
+question.encode = function (q, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(question.encodingLength(q))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ name.encode(q.name, buf, offset)
+ offset += name.encode.bytes
+
+ buf.writeUInt16BE(types.toType(q.type), offset)
+ offset += 2
+
+ buf.writeUInt16BE(classes.toClass(q.class === undefined ? 'IN' : q.class), offset)
+ offset += 2
+
+ question.encode.bytes = offset - oldOffset
+ return q
+}
+
+question.encode.bytes = 0
+
+question.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ const q = {}
+
+ q.name = name.decode(buf, offset)
+ offset += name.decode.bytes
+
+ q.type = types.toString(buf.readUInt16BE(offset))
+ offset += 2
+
+ q.class = classes.toString(buf.readUInt16BE(offset))
+ offset += 2
+
+ const qu = !!(q.class & QU_MASK)
+ if (qu) q.class &= NOT_QU_MASK
+
+ question.decode.bytes = offset - oldOffset
+ return q
+}
+
+question.decode.bytes = 0
+
+question.encodingLength = function (q) {
+ return name.encodingLength(q.name) + 4
+}
+
+exports.AUTHORITATIVE_ANSWER = 1 << 10
+exports.TRUNCATED_RESPONSE = 1 << 9
+exports.RECURSION_DESIRED = 1 << 8
+exports.RECURSION_AVAILABLE = 1 << 7
+exports.AUTHENTIC_DATA = 1 << 5
+exports.CHECKING_DISABLED = 1 << 4
+exports.DNSSEC_OK = 1 << 15
+
+exports.encode = function (result, buf, offset) {
+ if (!buf) buf = Buffer.allocUnsafe(exports.encodingLength(result))
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+
+ if (!result.questions) result.questions = []
+ if (!result.answers) result.answers = []
+ if (!result.authorities) result.authorities = []
+ if (!result.additionals) result.additionals = []
+
+ header.encode(result, buf, offset)
+ offset += header.encode.bytes
+
+ offset = encodeList(result.questions, question, buf, offset)
+ offset = encodeList(result.answers, answer, buf, offset)
+ offset = encodeList(result.authorities, answer, buf, offset)
+ offset = encodeList(result.additionals, answer, buf, offset)
+
+ exports.encode.bytes = offset - oldOffset
+
+ return buf
+}
+
+exports.encode.bytes = 0
+
+exports.decode = function (buf, offset) {
+ if (!offset) offset = 0
+
+ const oldOffset = offset
+ const result = header.decode(buf, offset)
+ offset += header.decode.bytes
+
+ offset = decodeList(result.questions, question, buf, offset)
+ offset = decodeList(result.answers, answer, buf, offset)
+ offset = decodeList(result.authorities, answer, buf, offset)
+ offset = decodeList(result.additionals, answer, buf, offset)
+
+ exports.decode.bytes = offset - oldOffset
+
+ return result
+}
+
+exports.decode.bytes = 0
+
+exports.encodingLength = function (result) {
+ return header.encodingLength(result) +
+ encodingLengthList(result.questions || [], question) +
+ encodingLengthList(result.answers || [], answer) +
+ encodingLengthList(result.authorities || [], answer) +
+ encodingLengthList(result.additionals || [], answer)
+}
+
+exports.streamEncode = function (result) {
+ const buf = exports.encode(result)
+ const sbuf = Buffer.allocUnsafe(2)
+ sbuf.writeUInt16BE(buf.byteLength)
+ const combine = Buffer.concat([sbuf, buf])
+ exports.streamEncode.bytes = combine.byteLength
+ return combine
+}
+
+exports.streamEncode.bytes = 0
+
+exports.streamDecode = function (sbuf) {
+ const len = sbuf.readUInt16BE(0)
+ if (sbuf.byteLength < len + 2) {
+ // not enough data
+ return null
+ }
+ const result = exports.decode(sbuf.slice(2))
+ exports.streamDecode.bytes = exports.decode.bytes
+ return result
+}
+
+exports.streamDecode.bytes = 0
+
+function encodingLengthList (list, enc) {
+ let len = 0
+ for (let i = 0; i < list.length; i++) len += enc.encodingLength(list[i])
+ return len
+}
+
+function encodeList (list, enc, buf, offset) {
+ for (let i = 0; i < list.length; i++) {
+ enc.encode(list[i], buf, offset)
+ offset += enc.encode.bytes
+ }
+ return offset
+}
+
+function decodeList (list, enc, buf, offset) {
+ for (let i = 0; i < list.length; i++) {
+ list[i] = enc.decode(buf, offset)
+ offset += enc.decode.bytes
+ }
+ return offset
+}
diff --git a/testing/xpcshell/dns-packet/opcodes.js b/testing/xpcshell/dns-packet/opcodes.js
new file mode 100644
index 0000000000..32b0a1b4de
--- /dev/null
+++ b/testing/xpcshell/dns-packet/opcodes.js
@@ -0,0 +1,50 @@
+'use strict'
+
+/*
+ * Traditional DNS header OPCODEs (4-bits) defined by IANA in
+ * https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-5
+ */
+
+exports.toString = function (opcode) {
+ switch (opcode) {
+ case 0: return 'QUERY'
+ case 1: return 'IQUERY'
+ case 2: return 'STATUS'
+ case 3: return 'OPCODE_3'
+ case 4: return 'NOTIFY'
+ case 5: return 'UPDATE'
+ case 6: return 'OPCODE_6'
+ case 7: return 'OPCODE_7'
+ case 8: return 'OPCODE_8'
+ case 9: return 'OPCODE_9'
+ case 10: return 'OPCODE_10'
+ case 11: return 'OPCODE_11'
+ case 12: return 'OPCODE_12'
+ case 13: return 'OPCODE_13'
+ case 14: return 'OPCODE_14'
+ case 15: return 'OPCODE_15'
+ }
+ return 'OPCODE_' + opcode
+}
+
+exports.toOpcode = function (code) {
+ switch (code.toUpperCase()) {
+ case 'QUERY': return 0
+ case 'IQUERY': return 1
+ case 'STATUS': return 2
+ case 'OPCODE_3': return 3
+ case 'NOTIFY': return 4
+ case 'UPDATE': return 5
+ case 'OPCODE_6': return 6
+ case 'OPCODE_7': return 7
+ case 'OPCODE_8': return 8
+ case 'OPCODE_9': return 9
+ case 'OPCODE_10': return 10
+ case 'OPCODE_11': return 11
+ case 'OPCODE_12': return 12
+ case 'OPCODE_13': return 13
+ case 'OPCODE_14': return 14
+ case 'OPCODE_15': return 15
+ }
+ return 0
+}
diff --git a/testing/xpcshell/dns-packet/optioncodes.js b/testing/xpcshell/dns-packet/optioncodes.js
new file mode 100644
index 0000000000..a683ce81e6
--- /dev/null
+++ b/testing/xpcshell/dns-packet/optioncodes.js
@@ -0,0 +1,61 @@
+'use strict'
+
+exports.toString = function (type) {
+ switch (type) {
+ // list at
+ // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-11
+ case 1: return 'LLQ'
+ case 2: return 'UL'
+ case 3: return 'NSID'
+ case 5: return 'DAU'
+ case 6: return 'DHU'
+ case 7: return 'N3U'
+ case 8: return 'CLIENT_SUBNET'
+ case 9: return 'EXPIRE'
+ case 10: return 'COOKIE'
+ case 11: return 'TCP_KEEPALIVE'
+ case 12: return 'PADDING'
+ case 13: return 'CHAIN'
+ case 14: return 'KEY_TAG'
+ case 15: return 'EDNS_ERROR'
+ case 26946: return 'DEVICEID'
+ }
+ if (type < 0) {
+ return null
+ }
+ return `OPTION_${type}`
+}
+
+exports.toCode = function (name) {
+ if (typeof name === 'number') {
+ return name
+ }
+ if (!name) {
+ return -1
+ }
+ switch (name.toUpperCase()) {
+ case 'OPTION_0': return 0
+ case 'LLQ': return 1
+ case 'UL': return 2
+ case 'NSID': return 3
+ case 'OPTION_4': return 4
+ case 'DAU': return 5
+ case 'DHU': return 6
+ case 'N3U': return 7
+ case 'CLIENT_SUBNET': return 8
+ case 'EXPIRE': return 9
+ case 'COOKIE': return 10
+ case 'TCP_KEEPALIVE': return 11
+ case 'PADDING': return 12
+ case 'CHAIN': return 13
+ case 'KEY_TAG': return 14
+ case 'EDNS_ERROR': return 15
+ case 'DEVICEID': return 26946
+ case 'OPTION_65535': return 65535
+ }
+ const m = name.match(/_(\d+)$/)
+ if (m) {
+ return parseInt(m[1], 10)
+ }
+ return -1
+}
diff --git a/testing/xpcshell/dns-packet/package.json b/testing/xpcshell/dns-packet/package.json
new file mode 100644
index 0000000000..31a859fc2b
--- /dev/null
+++ b/testing/xpcshell/dns-packet/package.json
@@ -0,0 +1,48 @@
+{
+ "name": "dns-packet",
+ "version": "5.2.1",
+ "description": "An abstract-encoding compliant module for encoding / decoding DNS packets",
+ "author": "Mathias Buus",
+ "license": "MIT",
+ "repository": "mafintosh/dns-packet",
+ "homepage": "https://github.com/mafintosh/dns-packet",
+ "engines": {
+ "node": ">=6"
+ },
+ "scripts": {
+ "clean": "rm -rf coverage .nyc_output/",
+ "lint": "eslint --color *.js examples/*.js",
+ "pretest": "npm run lint",
+ "test": "tape test.js",
+ "coverage": "nyc -r html npm test"
+ },
+ "dependencies": {
+ "ip": "^1.1.5"
+ },
+ "devDependencies": {
+ "eslint": "^5.14.1",
+ "eslint-config-standard": "^12.0.0",
+ "eslint-plugin-import": "^2.16.0",
+ "eslint-plugin-node": "^8.0.1",
+ "eslint-plugin-promise": "^4.0.1",
+ "eslint-plugin-standard": "^4.0.0",
+ "nyc": "^13.3.0",
+ "tape": "^4.10.1"
+ },
+ "keywords": [
+ "dns",
+ "packet",
+ "encodings",
+ "encoding",
+ "encoder",
+ "abstract-encoding"
+ ],
+ "files": [
+ "index.js",
+ "types.js",
+ "rcodes.js",
+ "opcodes.js",
+ "classes.js",
+ "optioncodes.js"
+ ]
+}
diff --git a/testing/xpcshell/dns-packet/rcodes.js b/testing/xpcshell/dns-packet/rcodes.js
new file mode 100644
index 0000000000..0500887c2a
--- /dev/null
+++ b/testing/xpcshell/dns-packet/rcodes.js
@@ -0,0 +1,50 @@
+'use strict'
+
+/*
+ * Traditional DNS header RCODEs (4-bits) defined by IANA in
+ * https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
+ */
+
+exports.toString = function (rcode) {
+ switch (rcode) {
+ case 0: return 'NOERROR'
+ case 1: return 'FORMERR'
+ case 2: return 'SERVFAIL'
+ case 3: return 'NXDOMAIN'
+ case 4: return 'NOTIMP'
+ case 5: return 'REFUSED'
+ case 6: return 'YXDOMAIN'
+ case 7: return 'YXRRSET'
+ case 8: return 'NXRRSET'
+ case 9: return 'NOTAUTH'
+ case 10: return 'NOTZONE'
+ case 11: return 'RCODE_11'
+ case 12: return 'RCODE_12'
+ case 13: return 'RCODE_13'
+ case 14: return 'RCODE_14'
+ case 15: return 'RCODE_15'
+ }
+ return 'RCODE_' + rcode
+}
+
+exports.toRcode = function (code) {
+ switch (code.toUpperCase()) {
+ case 'NOERROR': return 0
+ case 'FORMERR': return 1
+ case 'SERVFAIL': return 2
+ case 'NXDOMAIN': return 3
+ case 'NOTIMP': return 4
+ case 'REFUSED': return 5
+ case 'YXDOMAIN': return 6
+ case 'YXRRSET': return 7
+ case 'NXRRSET': return 8
+ case 'NOTAUTH': return 9
+ case 'NOTZONE': return 10
+ case 'RCODE_11': return 11
+ case 'RCODE_12': return 12
+ case 'RCODE_13': return 13
+ case 'RCODE_14': return 14
+ case 'RCODE_15': return 15
+ }
+ return 0
+}
diff --git a/testing/xpcshell/dns-packet/test.js b/testing/xpcshell/dns-packet/test.js
new file mode 100644
index 0000000000..adf4757dae
--- /dev/null
+++ b/testing/xpcshell/dns-packet/test.js
@@ -0,0 +1,613 @@
+'use strict'
+
+const tape = require('tape')
+const packet = require('./')
+const rcodes = require('./rcodes')
+const opcodes = require('./opcodes')
+const optioncodes = require('./optioncodes')
+
+tape('unknown', function (t) {
+ testEncoder(t, packet.unknown, Buffer.from('hello world'))
+ t.end()
+})
+
+tape('txt', function (t) {
+ testEncoder(t, packet.txt, [])
+ testEncoder(t, packet.txt, ['hello world'])
+ testEncoder(t, packet.txt, ['hello', 'world'])
+ testEncoder(t, packet.txt, [Buffer.from([0, 1, 2, 3, 4, 5])])
+ testEncoder(t, packet.txt, ['a', 'b', Buffer.from([0, 1, 2, 3, 4, 5])])
+ testEncoder(t, packet.txt, ['', Buffer.allocUnsafe(0)])
+ t.end()
+})
+
+tape('txt-scalar-string', function (t) {
+ const buf = packet.txt.encode('hi')
+ const val = packet.txt.decode(buf)
+ t.ok(val.length === 1, 'array length')
+ t.ok(val[0].toString() === 'hi', 'data')
+ t.end()
+})
+
+tape('txt-scalar-buffer', function (t) {
+ const data = Buffer.from([0, 1, 2, 3, 4, 5])
+ const buf = packet.txt.encode(data)
+ const val = packet.txt.decode(buf)
+ t.ok(val.length === 1, 'array length')
+ t.ok(val[0].equals(data), 'data')
+ t.end()
+})
+
+tape('txt-invalid-data', function (t) {
+ t.throws(function () { packet.txt.encode(null) }, 'null')
+ t.throws(function () { packet.txt.encode(undefined) }, 'undefined')
+ t.throws(function () { packet.txt.encode(10) }, 'number')
+ t.end()
+})
+
+tape('null', function (t) {
+ testEncoder(t, packet.null, Buffer.from([0, 1, 2, 3, 4, 5]))
+ t.end()
+})
+
+tape('hinfo', function (t) {
+ testEncoder(t, packet.hinfo, { cpu: 'intel', os: 'best one' })
+ t.end()
+})
+
+tape('ptr', function (t) {
+ testEncoder(t, packet.ptr, 'hello.world.com')
+ t.end()
+})
+
+tape('cname', function (t) {
+ testEncoder(t, packet.cname, 'hello.cname.world.com')
+ t.end()
+})
+
+tape('dname', function (t) {
+ testEncoder(t, packet.dname, 'hello.dname.world.com')
+ t.end()
+})
+
+tape('srv', function (t) {
+ testEncoder(t, packet.srv, { port: 9999, target: 'hello.world.com' })
+ testEncoder(t, packet.srv, { port: 9999, target: 'hello.world.com', priority: 42, weight: 10 })
+ t.end()
+})
+
+tape('caa', function (t) {
+ testEncoder(t, packet.caa, { flags: 128, tag: 'issue', value: 'letsencrypt.org', issuerCritical: true })
+ testEncoder(t, packet.caa, { tag: 'issue', value: 'letsencrypt.org', issuerCritical: true })
+ testEncoder(t, packet.caa, { tag: 'issue', value: 'letsencrypt.org' })
+ t.end()
+})
+
+tape('mx', function (t) {
+ testEncoder(t, packet.mx, { preference: 10, exchange: 'mx.hello.world.com' })
+ testEncoder(t, packet.mx, { exchange: 'mx.hello.world.com' })
+ t.end()
+})
+
+tape('ns', function (t) {
+ testEncoder(t, packet.ns, 'ns.world.com')
+ t.end()
+})
+
+tape('soa', function (t) {
+ testEncoder(t, packet.soa, {
+ mname: 'hello.world.com',
+ rname: 'root.hello.world.com',
+ serial: 2018010400,
+ refresh: 14400,
+ retry: 3600,
+ expire: 604800,
+ minimum: 3600
+ })
+ t.end()
+})
+
+tape('a', function (t) {
+ testEncoder(t, packet.a, '127.0.0.1')
+ t.end()
+})
+
+tape('aaaa', function (t) {
+ testEncoder(t, packet.aaaa, 'fe80::1')
+ t.end()
+})
+
+tape('query', function (t) {
+ testEncoder(t, packet, {
+ type: 'query',
+ questions: [{
+ type: 'A',
+ name: 'hello.a.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'query',
+ id: 42,
+ questions: [{
+ type: 'A',
+ class: 'IN',
+ name: 'hello.a.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'query',
+ id: 42,
+ questions: [{
+ type: 'A',
+ class: 'CH',
+ name: 'hello.a.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com'
+ }]
+ })
+
+ t.end()
+})
+
+tape('response', function (t) {
+ testEncoder(t, packet, {
+ type: 'response',
+ answers: [{
+ type: 'A',
+ class: 'IN',
+ flush: true,
+ name: 'hello.a.com',
+ data: '127.0.0.1'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'response',
+ flags: packet.TRUNCATED_RESPONSE,
+ answers: [{
+ type: 'A',
+ class: 'IN',
+ name: 'hello.a.com',
+ data: '127.0.0.1'
+ }, {
+ type: 'SRV',
+ class: 'IN',
+ name: 'hello.srv.com',
+ data: {
+ port: 9090,
+ target: 'hello.target.com'
+ }
+ }, {
+ type: 'CNAME',
+ class: 'IN',
+ name: 'hello.cname.com',
+ data: 'hello.other.domain.com'
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'response',
+ id: 100,
+ flags: 0,
+ additionals: [{
+ type: 'AAAA',
+ name: 'hello.a.com',
+ data: 'fe80::1'
+ }, {
+ type: 'PTR',
+ name: 'hello.ptr.com',
+ data: 'hello.other.ptr.com'
+ }, {
+ type: 'SRV',
+ name: 'hello.srv.com',
+ ttl: 42,
+ data: {
+ port: 9090,
+ target: 'hello.target.com'
+ }
+ }],
+ answers: [{
+ type: 'NULL',
+ name: 'hello.null.com',
+ data: Buffer.from([1, 2, 3, 4, 5])
+ }]
+ })
+
+ testEncoder(t, packet, {
+ type: 'response',
+ answers: [{
+ type: 'TXT',
+ name: 'emptytxt.com',
+ data: ''
+ }]
+ })
+
+ t.end()
+})
+
+tape('rcode', function (t) {
+ const errors = ['NOERROR', 'FORMERR', 'SERVFAIL', 'NXDOMAIN', 'NOTIMP', 'REFUSED', 'YXDOMAIN', 'YXRRSET', 'NXRRSET', 'NOTAUTH', 'NOTZONE', 'RCODE_11', 'RCODE_12', 'RCODE_13', 'RCODE_14', 'RCODE_15']
+ for (const i in errors) {
+ const code = rcodes.toRcode(errors[i])
+ t.ok(errors[i] === rcodes.toString(code), 'rcode conversion from/to string matches: ' + rcodes.toString(code))
+ }
+
+ const ops = ['QUERY', 'IQUERY', 'STATUS', 'OPCODE_3', 'NOTIFY', 'UPDATE', 'OPCODE_6', 'OPCODE_7', 'OPCODE_8', 'OPCODE_9', 'OPCODE_10', 'OPCODE_11', 'OPCODE_12', 'OPCODE_13', 'OPCODE_14', 'OPCODE_15']
+ for (const j in ops) {
+ const ocode = opcodes.toOpcode(ops[j])
+ t.ok(ops[j] === opcodes.toString(ocode), 'opcode conversion from/to string matches: ' + opcodes.toString(ocode))
+ }
+
+ const buf = packet.encode({
+ type: 'response',
+ id: 45632,
+ flags: 0x8480,
+ answers: [{
+ type: 'A',
+ name: 'hello.example.net',
+ data: '127.0.0.1'
+ }]
+ })
+ const val = packet.decode(buf)
+ t.ok(val.type === 'response', 'decode type')
+ t.ok(val.opcode === 'QUERY', 'decode opcode')
+ t.ok(val.flag_qr === true, 'decode flag_qr')
+ t.ok(val.flag_aa === true, 'decode flag_aa')
+ t.ok(val.flag_tc === false, 'decode flag_tc')
+ t.ok(val.flag_rd === false, 'decode flag_rd')
+ t.ok(val.flag_ra === true, 'decode flag_ra')
+ t.ok(val.flag_z === false, 'decode flag_z')
+ t.ok(val.flag_ad === false, 'decode flag_ad')
+ t.ok(val.flag_cd === false, 'decode flag_cd')
+ t.ok(val.rcode === 'NOERROR', 'decode rcode')
+ t.end()
+})
+
+tape('name_encoding', function (t) {
+ let data = 'foo.example.com'
+ const buf = Buffer.allocUnsafe(255)
+ let offset = 0
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 17, 'name encoding length matches')
+ let dd = packet.name.decode(buf, offset)
+ t.ok(data === dd, 'encode/decode matches')
+ offset += packet.name.encode.bytes
+
+ data = 'com'
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 5, 'name encoding length matches')
+ dd = packet.name.decode(buf, offset)
+ t.ok(data === dd, 'encode/decode matches')
+ offset += packet.name.encode.bytes
+
+ data = 'example.com.'
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 13, 'name encoding length matches')
+ dd = packet.name.decode(buf, offset)
+ t.ok(data.slice(0, -1) === dd, 'encode/decode matches')
+ offset += packet.name.encode.bytes
+
+ data = '.'
+ packet.name.encode(data, buf, offset)
+ t.ok(packet.name.encode.bytes === 1, 'name encoding length matches')
+ dd = packet.name.decode(buf, offset)
+ t.ok(data === dd, 'encode/decode matches')
+ t.end()
+})
+
+tape('stream', function (t) {
+ const val = {
+ type: 'query',
+ id: 45632,
+ flags: 0x8480,
+ answers: [{
+ type: 'A',
+ name: 'test2.example.net',
+ data: '198.51.100.1'
+ }]
+ }
+ const buf = packet.streamEncode(val)
+ const val2 = packet.streamDecode(buf)
+
+ t.same(buf.length, packet.streamEncode.bytes, 'streamEncode.bytes was set correctly')
+ t.ok(compare(t, val2.type, val.type), 'streamDecoded type match')
+ t.ok(compare(t, val2.id, val.id), 'streamDecoded id match')
+ t.ok(parseInt(val2.flags) === parseInt(val.flags & 0x7FFF), 'streamDecoded flags match')
+ const answer = val.answers[0]
+ const answer2 = val2.answers[0]
+ t.ok(compare(t, answer.type, answer2.type), 'streamDecoded RR type match')
+ t.ok(compare(t, answer.name, answer2.name), 'streamDecoded RR name match')
+ t.ok(compare(t, answer.data, answer2.data), 'streamDecoded RR rdata match')
+ t.end()
+})
+
+tape('opt', function (t) {
+ const val = {
+ type: 'query',
+ questions: [{
+ type: 'A',
+ name: 'hello.a.com'
+ }],
+ additionals: [{
+ type: 'OPT',
+ name: '.',
+ udpPayloadSize: 1024
+ }]
+ }
+ testEncoder(t, packet, val)
+ let buf = packet.encode(val)
+ let val2 = packet.decode(buf)
+ const additional1 = val.additionals[0]
+ let additional2 = val2.additionals[0]
+ t.ok(compare(t, additional1.name, additional2.name), 'name matches')
+ t.ok(compare(t, additional1.udpPayloadSize, additional2.udpPayloadSize), 'udp payload size matches')
+ t.ok(compare(t, 0, additional2.flags), 'flags match')
+ additional1.flags = packet.DNSSEC_OK
+ additional1.extendedRcode = 0x80
+ additional1.options = [ {
+ code: 'CLIENT_SUBNET', // edns-client-subnet, see RFC 7871
+ ip: 'fe80::',
+ sourcePrefixLength: 64
+ }, {
+ code: 8, // still ECS
+ ip: '5.6.0.0',
+ sourcePrefixLength: 16,
+ scopePrefixLength: 16
+ }, {
+ code: 'padding',
+ length: 31
+ }, {
+ code: 'TCP_KEEPALIVE'
+ }, {
+ code: 'tcp_keepalive',
+ timeout: 150
+ }, {
+ code: 'KEY_TAG',
+ tags: [1, 82, 987]
+ }]
+ buf = packet.encode(val)
+ val2 = packet.decode(buf)
+ additional2 = val2.additionals[0]
+ t.ok(compare(t, 1 << 15, additional2.flags), 'DO bit set in flags')
+ t.ok(compare(t, true, additional2.flag_do), 'DO bit set')
+ t.ok(compare(t, additional1.extendedRcode, additional2.extendedRcode), 'extended rcode matches')
+ t.ok(compare(t, 8, additional2.options[0].code))
+ t.ok(compare(t, 'fe80::', additional2.options[0].ip))
+ t.ok(compare(t, 64, additional2.options[0].sourcePrefixLength))
+ t.ok(compare(t, '5.6.0.0', additional2.options[1].ip))
+ t.ok(compare(t, 16, additional2.options[1].sourcePrefixLength))
+ t.ok(compare(t, 16, additional2.options[1].scopePrefixLength))
+ t.ok(compare(t, additional1.options[2].length, additional2.options[2].data.length))
+ t.ok(compare(t, additional1.options[3].timeout, undefined))
+ t.ok(compare(t, additional1.options[4].timeout, additional2.options[4].timeout))
+ t.ok(compare(t, additional1.options[5].tags, additional2.options[5].tags))
+ t.end()
+})
+
+tape('dnskey', function (t) {
+ testEncoder(t, packet.dnskey, {
+ flags: packet.dnskey.SECURE_ENTRYPOINT | packet.dnskey.ZONE_KEY,
+ algorithm: 1,
+ key: Buffer.from([0, 1, 2, 3, 4, 5])
+ })
+ t.end()
+})
+
+tape('rrsig', function (t) {
+ const testRRSIG = {
+ typeCovered: 'A',
+ algorithm: 1,
+ labels: 2,
+ originalTTL: 3600,
+ expiration: 1234,
+ inception: 1233,
+ keyTag: 2345,
+ signersName: 'foo.com',
+ signature: Buffer.from([0, 1, 2, 3, 4, 5])
+ }
+ testEncoder(t, packet.rrsig, testRRSIG)
+
+ // Check the signature length is correct with extra junk at the end
+ const buf = Buffer.allocUnsafe(packet.rrsig.encodingLength(testRRSIG) + 4)
+ packet.rrsig.encode(testRRSIG, buf)
+ const val2 = packet.rrsig.decode(buf)
+ t.ok(compare(t, testRRSIG, val2))
+
+ t.end()
+})
+
+tape('rrp', function (t) {
+ testEncoder(t, packet.rp, {
+ mbox: 'foo.bar.com',
+ txt: 'baz.bar.com'
+ })
+ testEncoder(t, packet.rp, {
+ mbox: 'foo.bar.com'
+ })
+ testEncoder(t, packet.rp, {
+ txt: 'baz.bar.com'
+ })
+ testEncoder(t, packet.rp, {})
+ t.end()
+})
+
+tape('nsec', function (t) {
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['A', 'DNSKEY', 'CAA', 'DLV']
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TXT'] // 16
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TKEY'] // 249
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['RRSIG', 'NSEC']
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TXT', 'RRSIG']
+ })
+ testEncoder(t, packet.nsec, {
+ nextDomain: 'foo.com',
+ rrtypes: ['TXT', 'NSEC']
+ })
+
+ // Test with the sample NSEC from https://tools.ietf.org/html/rfc4034#section-4.3
+ var sampleNSEC = Buffer.from('003704686f7374076578616d706c6503636f6d00' +
+ '0006400100000003041b000000000000000000000000000000000000000000000' +
+ '000000020', 'hex')
+ var decoded = packet.nsec.decode(sampleNSEC)
+ t.ok(compare(t, decoded, {
+ nextDomain: 'host.example.com',
+ rrtypes: ['A', 'MX', 'RRSIG', 'NSEC', 'UNKNOWN_1234']
+ }))
+ var reencoded = packet.nsec.encode(decoded)
+ t.same(sampleNSEC.length, reencoded.length)
+ t.same(sampleNSEC, reencoded)
+ t.end()
+})
+
+tape('nsec3', function (t) {
+ testEncoder(t, packet.nsec3, {
+ algorithm: 1,
+ flags: 0,
+ iterations: 257,
+ salt: Buffer.from([42, 42, 42]),
+ nextDomain: Buffer.from([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]),
+ rrtypes: ['A', 'DNSKEY', 'CAA', 'DLV']
+ })
+ t.end()
+})
+
+tape('ds', function (t) {
+ testEncoder(t, packet.ds, {
+ keyTag: 1234,
+ algorithm: 1,
+ digestType: 1,
+ digest: Buffer.from([0, 1, 2, 3, 4, 5])
+ })
+ t.end()
+})
+
+tape('unpack', function (t) {
+ const buf = Buffer.from([
+ 0x00, 0x79,
+ 0xde, 0xad, 0x85, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x02, 0x00, 0x02, 0x02, 0x6f, 0x6a, 0x05,
+ 0x62, 0x61, 0x6e, 0x67, 0x6a, 0x03, 0x63, 0x6f,
+ 0x6d, 0x00, 0x00, 0x01, 0x00, 0x01, 0xc0, 0x0c,
+ 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10,
+ 0x00, 0x04, 0x81, 0xfa, 0x0b, 0xaa, 0xc0, 0x0f,
+ 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10,
+ 0x00, 0x05, 0x02, 0x63, 0x6a, 0xc0, 0x0f, 0xc0,
+ 0x0f, 0x00, 0x02, 0x00, 0x01, 0x00, 0x00, 0x0e,
+ 0x10, 0x00, 0x02, 0xc0, 0x0c, 0xc0, 0x3a, 0x00,
+ 0x01, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10, 0x00,
+ 0x04, 0x45, 0x4d, 0x9b, 0x9c, 0xc0, 0x0c, 0x00,
+ 0x1c, 0x00, 0x01, 0x00, 0x00, 0x0e, 0x10, 0x00,
+ 0x10, 0x20, 0x01, 0x04, 0x18, 0x00, 0x00, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xf9
+ ])
+ const val = packet.streamDecode(buf)
+ const answer = val.answers[0]
+ const authority = val.authorities[1]
+ t.ok(val.rcode === 'NOERROR', 'decode rcode')
+ t.ok(compare(t, answer.type, 'A'), 'streamDecoded RR type match')
+ t.ok(compare(t, answer.name, 'oj.bangj.com'), 'streamDecoded RR name match')
+ t.ok(compare(t, answer.data, '129.250.11.170'), 'streamDecoded RR rdata match')
+ t.ok(compare(t, authority.type, 'NS'), 'streamDecoded RR type match')
+ t.ok(compare(t, authority.name, 'bangj.com'), 'streamDecoded RR name match')
+ t.ok(compare(t, authority.data, 'oj.bangj.com'), 'streamDecoded RR rdata match')
+ t.end()
+})
+
+tape('optioncodes', function (t) {
+ const opts = [
+ [0, 'OPTION_0'],
+ [1, 'LLQ'],
+ [2, 'UL'],
+ [3, 'NSID'],
+ [4, 'OPTION_4'],
+ [5, 'DAU'],
+ [6, 'DHU'],
+ [7, 'N3U'],
+ [8, 'CLIENT_SUBNET'],
+ [9, 'EXPIRE'],
+ [10, 'COOKIE'],
+ [11, 'TCP_KEEPALIVE'],
+ [12, 'PADDING'],
+ [13, 'CHAIN'],
+ [14, 'KEY_TAG'],
+ [26946, 'DEVICEID'],
+ [65535, 'OPTION_65535'],
+ [64000, 'OPTION_64000'],
+ [65002, 'OPTION_65002'],
+ [-1, null]
+ ]
+ for (const [code, str] of opts) {
+ const s = optioncodes.toString(code)
+ t.ok(compare(t, s, str), `${code} => ${str}`)
+ t.ok(compare(t, optioncodes.toCode(s), code), `${str} => ${code}`)
+ }
+ t.ok(compare(t, optioncodes.toCode('INVALIDINVALID'), -1))
+ t.end()
+})
+
+function testEncoder (t, rpacket, val) {
+ const buf = rpacket.encode(val)
+ const val2 = rpacket.decode(buf)
+
+ t.same(buf.length, rpacket.encode.bytes, 'encode.bytes was set correctly')
+ t.same(buf.length, rpacket.encodingLength(val), 'encoding length matches')
+ t.ok(compare(t, val, val2), 'decoded object match')
+
+ const buf2 = rpacket.encode(val2)
+ const val3 = rpacket.decode(buf2)
+
+ t.same(buf2.length, rpacket.encode.bytes, 'encode.bytes was set correctly on re-encode')
+ t.same(buf2.length, rpacket.encodingLength(val), 'encoding length matches on re-encode')
+
+ t.ok(compare(t, val, val3), 'decoded object match on re-encode')
+ t.ok(compare(t, val2, val3), 're-encoded decoded object match on re-encode')
+
+ const bigger = Buffer.allocUnsafe(buf2.length + 10)
+
+ const buf3 = rpacket.encode(val, bigger, 10)
+ const val4 = rpacket.decode(buf3, 10)
+
+ t.ok(buf3 === bigger, 'echoes buffer on external buffer')
+ t.same(rpacket.encode.bytes, buf.length, 'encode.bytes is the same on external buffer')
+ t.ok(compare(t, val, val4), 'decoded object match on external buffer')
+}
+
+function compare (t, a, b) {
+ if (Buffer.isBuffer(a)) return a.toString('hex') === b.toString('hex')
+ if (typeof a === 'object' && a && b) {
+ const keys = Object.keys(a)
+ for (let i = 0; i < keys.length; i++) {
+ if (!compare(t, a[keys[i]], b[keys[i]])) {
+ return false
+ }
+ }
+ } else if (Array.isArray(b) && !Array.isArray(a)) {
+ // TXT always decode as array
+ return a.toString() === b[0].toString()
+ } else {
+ return a === b
+ }
+ return true
+}
diff --git a/testing/xpcshell/dns-packet/types.js b/testing/xpcshell/dns-packet/types.js
new file mode 100644
index 0000000000..110705b160
--- /dev/null
+++ b/testing/xpcshell/dns-packet/types.js
@@ -0,0 +1,105 @@
+'use strict'
+
+exports.toString = function (type) {
+ switch (type) {
+ case 1: return 'A'
+ case 10: return 'NULL'
+ case 28: return 'AAAA'
+ case 18: return 'AFSDB'
+ case 42: return 'APL'
+ case 257: return 'CAA'
+ case 60: return 'CDNSKEY'
+ case 59: return 'CDS'
+ case 37: return 'CERT'
+ case 5: return 'CNAME'
+ case 49: return 'DHCID'
+ case 32769: return 'DLV'
+ case 39: return 'DNAME'
+ case 48: return 'DNSKEY'
+ case 43: return 'DS'
+ case 55: return 'HIP'
+ case 13: return 'HINFO'
+ case 45: return 'IPSECKEY'
+ case 25: return 'KEY'
+ case 36: return 'KX'
+ case 29: return 'LOC'
+ case 15: return 'MX'
+ case 35: return 'NAPTR'
+ case 2: return 'NS'
+ case 47: return 'NSEC'
+ case 50: return 'NSEC3'
+ case 51: return 'NSEC3PARAM'
+ case 12: return 'PTR'
+ case 46: return 'RRSIG'
+ case 17: return 'RP'
+ case 24: return 'SIG'
+ case 6: return 'SOA'
+ case 99: return 'SPF'
+ case 33: return 'SRV'
+ case 44: return 'SSHFP'
+ case 32768: return 'TA'
+ case 249: return 'TKEY'
+ case 52: return 'TLSA'
+ case 250: return 'TSIG'
+ case 16: return 'TXT'
+ case 252: return 'AXFR'
+ case 251: return 'IXFR'
+ case 41: return 'OPT'
+ case 255: return 'ANY'
+ case 65: return 'HTTPS'
+ }
+ return 'UNKNOWN_' + type
+}
+
+exports.toType = function (name) {
+ switch (name.toUpperCase()) {
+ case 'A': return 1
+ case 'NULL': return 10
+ case 'AAAA': return 28
+ case 'AFSDB': return 18
+ case 'APL': return 42
+ case 'CAA': return 257
+ case 'CDNSKEY': return 60
+ case 'CDS': return 59
+ case 'CERT': return 37
+ case 'CNAME': return 5
+ case 'DHCID': return 49
+ case 'DLV': return 32769
+ case 'DNAME': return 39
+ case 'DNSKEY': return 48
+ case 'DS': return 43
+ case 'HIP': return 55
+ case 'HINFO': return 13
+ case 'IPSECKEY': return 45
+ case 'KEY': return 25
+ case 'KX': return 36
+ case 'LOC': return 29
+ case 'MX': return 15
+ case 'NAPTR': return 35
+ case 'NS': return 2
+ case 'NSEC': return 47
+ case 'NSEC3': return 50
+ case 'NSEC3PARAM': return 51
+ case 'PTR': return 12
+ case 'RRSIG': return 46
+ case 'RP': return 17
+ case 'SIG': return 24
+ case 'SOA': return 6
+ case 'SPF': return 99
+ case 'SRV': return 33
+ case 'SSHFP': return 44
+ case 'TA': return 32768
+ case 'TKEY': return 249
+ case 'TLSA': return 52
+ case 'TSIG': return 250
+ case 'TXT': return 16
+ case 'AXFR': return 252
+ case 'IXFR': return 251
+ case 'OPT': return 41
+ case 'ANY': return 255
+ case 'HTTPS': return 65
+ case '*': return 255
+ }
+ if (name.toUpperCase().startsWith('UNKNOWN_')) return parseInt(name.slice(8))
+ return 0
+}