summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 02:25:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 02:25:50 +0000
commit19f4f86bfed21c5326ed2acebe1163f3a83e832b (patch)
treed59b9989ce55ed23693e80974d94c856f1c2c8b1 /tools
parentInitial commit. (diff)
downloadsystemd-19f4f86bfed21c5326ed2acebe1163f3a83e832b.tar.xz
systemd-19f4f86bfed21c5326ed2acebe1163f3a83e832b.zip
Adding upstream version 241.upstream/241upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools')
-rwxr-xr-xtools/add-git-hook.sh12
-rwxr-xr-xtools/catalog-report.py84
-rwxr-xr-xtools/check-directives.sh21
-rwxr-xr-xtools/check-includes.pl23
-rwxr-xr-xtools/choose-default-locale.sh12
-rwxr-xr-xtools/coverity.sh233
-rwxr-xr-xtools/find-build-dir.sh32
-rwxr-xr-xtools/find-double-newline.sh42
-rwxr-xr-xtools/find-tabs.sh42
-rw-r--r--tools/gdb-sd_dump_hashmaps.py79
-rwxr-xr-xtools/generate-gperfs.py24
-rwxr-xr-xtools/make-directive-index.py307
-rwxr-xr-xtools/make-man-index.py110
-rwxr-xr-xtools/make-man-rules.py84
-rwxr-xr-xtools/meson-apply-m4.sh24
-rwxr-xr-xtools/meson-build.sh20
-rwxr-xr-xtools/meson-check-api-docs.sh34
-rwxr-xr-xtools/meson-check-compilation.sh4
-rwxr-xr-xtools/meson-check-help.sh23
-rwxr-xr-xtools/meson-git-contrib.sh9
-rwxr-xr-xtools/meson-hwdb-update.sh32
-rwxr-xr-xtools/meson-make-symlink.sh12
-rwxr-xr-xtools/meson-vcs-tag.sh18
-rwxr-xr-xtools/oss-fuzz.sh56
-rwxr-xr-xtools/xml_helper.py20
25 files changed, 1357 insertions, 0 deletions
diff --git a/tools/add-git-hook.sh b/tools/add-git-hook.sh
new file mode 100755
index 0000000..c1db99b
--- /dev/null
+++ b/tools/add-git-hook.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+
+cd "$MESON_SOURCE_ROOT"
+
+if [ ! -f .git/hooks/pre-commit.sample -o -f .git/hooks/pre-commit ]; then
+ exit 2 # not needed
+fi
+
+cp -p .git/hooks/pre-commit.sample .git/hooks/pre-commit
+chmod +x .git/hooks/pre-commit
+echo 'Activated pre-commit hook'
diff --git a/tools/catalog-report.py b/tools/catalog-report.py
new file mode 100755
index 0000000..ca1e13d
--- /dev/null
+++ b/tools/catalog-report.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: MIT
+#
+# This file is distributed under the MIT license, see below.
+#
+# The MIT License (MIT)
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""
+Prints out journal entries with no or bad catalog explanations.
+"""
+
+import re
+from systemd import journal, id128
+
+j = journal.Reader()
+
+logged = set()
+pattern = re.compile('@[A-Z0-9_]+@')
+
+mids = {v:k for k,v in id128.__dict__.items()
+ if k.startswith('SD_MESSAGE')}
+
+freq = 1000
+
+def log_entry(x):
+ if 'CODE_FILE' in x:
+ # some of our code was using 'CODE_FUNCTION' instead of 'CODE_FUNC'
+ print('{}:{} {}'.format(x.get('CODE_FILE', '???'),
+ x.get('CODE_LINE', '???'),
+ x.get('CODE_FUNC', None) or x.get('CODE_FUNCTION', '???')))
+ print(' {}'.format(x.get('MESSAGE', 'no message!')))
+ for k, v in x.items():
+ if k.startswith('CODE_') or k in {'MESSAGE_ID', 'MESSAGE'}:
+ continue
+ print(' {}={}'.format(k, v))
+ print()
+
+for i, x in enumerate(j):
+ if i % freq == 0:
+ print(i, end='\r')
+
+ try:
+ mid = x['MESSAGE_ID']
+ except KeyError:
+ continue
+ name = mids.get(mid, 'unknown')
+
+ try:
+ desc = journal.get_catalog(mid)
+ except FileNotFoundError:
+ if mid in logged:
+ continue
+
+ print('{} {.hex}: no catalog entry'.format(name, mid))
+ log_entry(x)
+ logged.add(mid)
+ continue
+
+ fields = [field[1:-1] for field in pattern.findall(desc)]
+ for field in fields:
+ index = (mid, field)
+ if field in x or index in logged:
+ continue
+ print('{} {.hex}: no field {}'.format(name, mid, field))
+ log_entry(x)
+ logged.add(index)
diff --git a/tools/check-directives.sh b/tools/check-directives.sh
new file mode 100755
index 0000000..e2fd388
--- /dev/null
+++ b/tools/check-directives.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -e
+
+function generate_directives() {
+ perl -aF'/[\s,]+/' -ne '
+ if (my ($s, $d) = ($F[0] =~ /^([^\s\.]+)\.([^\s\.]+)$/)) { $d{$s}{"$d="} = 1; }
+ END { while (my ($key, $value) = each %d) {
+ printf "[%s]\n%s\n", $key, join("\n", keys(%$value))
+ }}' "$1"
+}
+
+if [[ $(generate_directives src/network/networkd-network-gperf.gperf | wc -l) -ne $(wc -l <test/fuzz/fuzz-network-parser/directives.network) ]]; then
+ echo "Looks like test/fuzz/fuzz-network-parser/directives.network hasn't been updated"
+ exit 1
+fi
+
+if [[ $(generate_directives src/network/netdev/netdev-gperf.gperf | wc -l) -ne $(wc -l <test/fuzz/fuzz-netdev-parser/directives.netdev) ]]; then
+ echo "Looks like test/fuzz/fuzz-netdev-parser/directives.netdev hasn't been updated"
+ exit 1
+fi
diff --git a/tools/check-includes.pl b/tools/check-includes.pl
new file mode 100755
index 0000000..c8bfcba
--- /dev/null
+++ b/tools/check-includes.pl
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: CC0-1.0
+#!/usr/bin/env perl
+#
+# checkincludes: Find files included more than once in (other) files.
+
+foreach $file (@ARGV) {
+ open(FILE, $file) or die "Cannot open $file: $!.\n";
+
+ my %includedfiles = ();
+
+ while (<FILE>) {
+ if (m/^\s*#\s*include\s*[<"](\S*)[>"]/o) {
+ ++$includedfiles{$1};
+ }
+ }
+ foreach $filename (keys %includedfiles) {
+ if ($includedfiles{$filename} > 1) {
+ print "$file: $filename is included more than once.\n";
+ }
+ }
+
+ close(FILE);
+}
diff --git a/tools/choose-default-locale.sh b/tools/choose-default-locale.sh
new file mode 100755
index 0000000..3b30038
--- /dev/null
+++ b/tools/choose-default-locale.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -e
+
+# Fedora uses C.utf8 but Debian uses C.UTF-8
+if locale -a | grep -xq -E 'C\.(utf8|UTF-8)'; then
+ echo 'C.UTF-8'
+elif locale -a | grep -xqF 'en_US.utf8'; then
+ echo 'en_US.UTF-8'
+else
+ echo 'C'
+fi
diff --git a/tools/coverity.sh b/tools/coverity.sh
new file mode 100755
index 0000000..af4c920
--- /dev/null
+++ b/tools/coverity.sh
@@ -0,0 +1,233 @@
+#!/bin/env bash
+
+# The official unmodified version of the script can be found at
+# https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh
+
+set -e
+
+# Declare build command
+COVERITY_SCAN_BUILD_COMMAND="ninja -C cov-build"
+
+# Environment check
+# Use default values if not set
+SCAN_URL=${SCAN_URL:="https://scan.coverity.com"}
+TOOL_BASE=${TOOL_BASE:="/tmp/coverity-scan-analysis"}
+UPLOAD_URL=${UPLOAD_URL:="https://scan.coverity.com/builds"}
+
+# These must be set by environment
+echo -e "\033[33;1mNote: COVERITY_SCAN_PROJECT_NAME and COVERITY_SCAN_TOKEN are available on Project Settings page on scan.coverity.com\033[0m"
+[ -z "$COVERITY_SCAN_PROJECT_NAME" ] && echo "ERROR: COVERITY_SCAN_PROJECT_NAME must be set" && exit 1
+[ -z "$COVERITY_SCAN_NOTIFICATION_EMAIL" ] && echo "ERROR: COVERITY_SCAN_NOTIFICATION_EMAIL must be set" && exit 1
+[ -z "$COVERITY_SCAN_BRANCH_PATTERN" ] && echo "ERROR: COVERITY_SCAN_BRANCH_PATTERN must be set" && exit 1
+[ -z "$COVERITY_SCAN_BUILD_COMMAND" ] && echo "ERROR: COVERITY_SCAN_BUILD_COMMAND must be set" && exit 1
+[ -z "$COVERITY_SCAN_TOKEN" ] && echo "ERROR: COVERITY_SCAN_TOKEN must be set" && exit 1
+
+# Do not run on pull requests
+if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then
+ echo -e "\033[33;1mINFO: Skipping Coverity Analysis: branch is a pull request.\033[0m"
+ exit 0
+fi
+
+# Verify this branch should run
+if [[ "${TRAVIS_BRANCH^^}" =~ "${COVERITY_SCAN_BRANCH_PATTERN^^}" ]]; then
+ echo -e "\033[33;1mCoverity Scan configured to run on branch ${TRAVIS_BRANCH}\033[0m"
+else
+ echo -e "\033[33;1mCoverity Scan NOT configured to run on branch ${TRAVIS_BRANCH}\033[0m"
+ exit 1
+fi
+
+# Verify upload is permitted
+AUTH_RES=`curl -s --form project="$COVERITY_SCAN_PROJECT_NAME" --form token="$COVERITY_SCAN_TOKEN" $SCAN_URL/api/upload_permitted`
+if [ "$AUTH_RES" = "Access denied" ]; then
+ echo -e "\033[33;1mCoverity Scan API access denied. Check COVERITY_SCAN_PROJECT_NAME and COVERITY_SCAN_TOKEN.\033[0m"
+ exit 1
+else
+ AUTH=`echo $AUTH_RES | python -c "import sys, json; print(json.load(sys.stdin)['upload_permitted'])"`
+ if [ "$AUTH" = "True" ]; then
+ echo -e "\033[33;1mCoverity Scan analysis authorized per quota.\033[0m"
+ else
+ WHEN=`echo $AUTH_RES | python -c "import sys, json; print(json.load(sys.stdin)['next_upload_permitted_at'])"`
+ echo -e "\033[33;1mCoverity Scan analysis NOT authorized until $WHEN.\033[0m"
+ exit 1
+ fi
+fi
+
+TOOL_DIR=`find $TOOL_BASE -type d -name 'cov-analysis*'`
+export PATH="$TOOL_DIR/bin:$PATH"
+
+# Disable CCACHE for cov-build to compilation units correctly
+export CCACHE_DISABLE=1
+
+# FUNCTION DEFINITIONS
+# --------------------
+_help()
+{
+ # displays help and exits
+ cat <<-EOF
+ USAGE: $0 [CMD] [OPTIONS]
+
+ CMD
+ build Issue Coverity build
+ upload Upload coverity archive for analysis
+ Note: By default, archive is created from default results directory.
+ To provide custom archive or results directory, see --result-dir
+ and --tar options below.
+
+ OPTIONS
+ -h,--help Display this menu and exits
+
+ Applicable to build command
+ ---------------------------
+ -o,--out-dir Specify Coverity intermediate directory (defaults to 'cov-int')
+ -t,--tar bool, archive the output to .tgz file (defaults to false)
+
+ Applicable to upload command
+ ----------------------------
+ -d, --result-dir Specify result directory if different from default ('cov-int')
+ -t, --tar ARCHIVE Use custom .tgz archive instead of intermediate directory or pre-archived .tgz
+ (by default 'analysis-result.tgz'
+ EOF
+ return;
+}
+
+_pack()
+{
+ RESULTS_ARCHIVE=${RESULTS_ARCHIVE:-'analysis-results.tgz'}
+
+ echo -e "\033[33;1mTarring Coverity Scan Analysis results...\033[0m"
+ tar czf $RESULTS_ARCHIVE $RESULTS_DIR
+ SHA=`git rev-parse --short HEAD`
+
+ PACKED=true
+}
+
+
+_build()
+{
+ echo -e "\033[33;1mRunning Coverity Scan Analysis Tool...\033[0m"
+ local _cov_build_options=""
+ #local _cov_build_options="--return-emit-failures 8 --parse-error-threshold 85"
+ eval "${COVERITY_SCAN_BUILD_COMMAND_PREPEND}"
+ COVERITY_UNSUPPORTED=1 cov-build --dir $RESULTS_DIR $_cov_build_options sh -c "$COVERITY_SCAN_BUILD_COMMAND"
+ cov-import-scm --dir $RESULTS_DIR --scm git --log $RESULTS_DIR/scm_log.txt
+
+ if [ $? != 0 ]; then
+ echo -e "\033[33;1mCoverity Scan Build failed: $TEXT.\033[0m"
+ return 1
+ fi
+
+ [ -z $TAR ] || [ $TAR = false ] && return 0
+
+ if [ "$TAR" = true ]; then
+ _pack
+ fi
+}
+
+
+_upload()
+{
+ # pack results
+ [ -z $PACKED ] || [ $PACKED = false ] && _pack
+
+ # Upload results
+ echo -e "\033[33;1mUploading Coverity Scan Analysis results...\033[0m"
+ response=$(curl \
+ --silent --write-out "\n%{http_code}\n" \
+ --form project=$COVERITY_SCAN_PROJECT_NAME \
+ --form token=$COVERITY_SCAN_TOKEN \
+ --form email=$COVERITY_SCAN_NOTIFICATION_EMAIL \
+ --form file=@$RESULTS_ARCHIVE \
+ --form version=$SHA \
+ --form description="Travis CI build" \
+ $UPLOAD_URL)
+ printf "\033[33;1mThe response is\033[0m\n%s\n" "$response"
+ status_code=$(echo "$response" | sed -n '$p')
+ # Coverity Scan used to respond with 201 on successfully receieving analysis results.
+ # Now for some reason it sends 200 and may change back in the foreseeable future.
+ # See https://github.com/pmem/pmdk/commit/7b103fd2dd54b2e5974f71fb65c81ab3713c12c5
+ if [ "$status_code" != "200" ]; then
+ TEXT=$(echo "$response" | sed '$d')
+ echo -e "\033[33;1mCoverity Scan upload failed: $TEXT.\033[0m"
+ exit 1
+ fi
+
+ echo -e "\n\033[33;1mCoverity Scan Analysis completed succesfully.\033[0m"
+ exit 0
+}
+
+# PARSE COMMAND LINE OPTIONS
+# --------------------------
+
+case $1 in
+ -h|--help)
+ _help
+ exit 0
+ ;;
+ build)
+ CMD='build'
+ TEMP=`getopt -o ho:t --long help,out-dir:,tar -n '$0' -- "$@"`
+ _ec=$?
+ [[ $_ec -gt 0 ]] && _help && exit $_ec
+ shift
+ ;;
+ upload)
+ CMD='upload'
+ TEMP=`getopt -o hd:t: --long help,result-dir:tar: -n '$0' -- "$@"`
+ _ec=$?
+ [[ $_ec -gt 0 ]] && _help && exit $_ec
+ shift
+ ;;
+ *)
+ _help && exit 1 ;;
+esac
+
+RESULTS_DIR='cov-int'
+
+eval set -- "$TEMP"
+if [ $? != 0 ] ; then exit 1 ; fi
+
+# extract options and their arguments into variables.
+if [[ $CMD == 'build' ]]; then
+ TAR=false
+ while true ; do
+ case $1 in
+ -h|--help)
+ _help
+ exit 0
+ ;;
+ -o|--out-dir)
+ RESULTS_DIR="$2"
+ shift 2
+ ;;
+ -t|--tar)
+ TAR=true
+ shift
+ ;;
+ --) _build; shift ; break ;;
+ *) echo "Internal error" ; _help && exit 6 ;;
+ esac
+ done
+
+elif [[ $CMD == 'upload' ]]; then
+ while true ; do
+ case $1 in
+ -h|--help)
+ _help
+ exit 0
+ ;;
+ -d|--result-dir)
+ CHANGE_DEFAULT_DIR=true
+ RESULTS_DIR="$2"
+ shift 2
+ ;;
+ -t|--tar)
+ RESULTS_ARCHIVE="$2"
+ [ -z $CHANGE_DEFAULT_DIR ] || [ $CHANGE_DEFAULT_DIR = false ] && PACKED=true
+ shift 2
+ ;;
+ --) _upload; shift ; break ;;
+ *) echo "Internal error" ; _help && exit 6 ;;
+ esac
+ done
+
+fi
diff --git a/tools/find-build-dir.sh b/tools/find-build-dir.sh
new file mode 100755
index 0000000..06b6297
--- /dev/null
+++ b/tools/find-build-dir.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+set -e
+
+# Try to guess the build directory:
+# we look for subdirectories of the parent directory that look like ninja build dirs.
+
+if [ -n "$BUILD_DIR" ]; then
+ echo "$(realpath "$BUILD_DIR")"
+ exit 0
+fi
+
+root="$(dirname "$(realpath "$0")")"
+
+found=
+for i in "$root"/../*/build.ninja; do
+ c="$(dirname $i)"
+ [ -d "$c" ] || continue
+ [ "$(basename "$c")" != mkosi.builddir ] || continue
+
+ if [ -n "$found" ]; then
+ echo 'Found multiple candidates, specify build directory with $BUILD_DIR' >&2
+ exit 2
+ fi
+ found="$c"
+done
+
+if [ -z "$found" ]; then
+ echo 'Specify build directory with $BUILD_DIR' >&2
+ exit 1
+fi
+
+echo "$(realpath $found)"
diff --git a/tools/find-double-newline.sh b/tools/find-double-newline.sh
new file mode 100755
index 0000000..6a6790b
--- /dev/null
+++ b/tools/find-double-newline.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1+
+
+TOP=`git rev-parse --show-toplevel`
+
+case "$1" in
+
+ recdiff)
+ if [ "$2" = "" ] ; then
+ DIR="$TOP"
+ else
+ DIR="$2"
+ fi
+
+ find $DIR -type f \( -name '*.[ch]' -o -name '*.xml' \) -exec $0 diff \{\} \;
+ ;;
+
+ recpatch)
+ if [ "$2" = "" ] ; then
+ DIR="$TOP"
+ else
+ DIR="$2"
+ fi
+
+ find $DIR -type f \( -name '*.[ch]' -o -name '*.xml' \) -exec $0 patch \{\} \;
+ ;;
+
+ diff)
+ T=`mktemp`
+ sed '/^$/N;/^\n$/D' < "$2" > "$T"
+ diff -u "$2" "$T"
+ rm -f "$T"
+ ;;
+
+ patch)
+ sed -i '/^$/N;/^\n$/D' "$2"
+ ;;
+
+ *)
+ echo "Expected recdiff|recpatch|diff|patch as verb." >&2
+ ;;
+esac
diff --git a/tools/find-tabs.sh b/tools/find-tabs.sh
new file mode 100755
index 0000000..e32eac8
--- /dev/null
+++ b/tools/find-tabs.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1+
+
+TOP=`git rev-parse --show-toplevel`
+
+case "$1" in
+
+ recdiff)
+ if [ "$2" = "" ] ; then
+ DIR="$TOP"
+ else
+ DIR="$2"
+ fi
+
+ find $DIR -type f \( -name '*.[ch]' -o -name '*.xml' \) -exec $0 diff \{\} \;
+ ;;
+
+ recpatch)
+ if [ "$2" = "" ] ; then
+ DIR="$TOP"
+ else
+ DIR="$2"
+ fi
+
+ find $DIR -type f \( -name '*.[ch]' -o -name '*.xml' \) -exec $0 patch \{\} \;
+ ;;
+
+ diff)
+ T=`mktemp`
+ sed 's/\t/ /g' < "$2" > "$T"
+ diff -u "$2" "$T"
+ rm -f "$T"
+ ;;
+
+ patch)
+ sed -i 's/\t/ /g' "$2"
+ ;;
+
+ *)
+ echo "Expected recdiff|recpatch|diff|patch as verb." >&2
+ ;;
+esac
diff --git a/tools/gdb-sd_dump_hashmaps.py b/tools/gdb-sd_dump_hashmaps.py
new file mode 100644
index 0000000..4e8593f
--- /dev/null
+++ b/tools/gdb-sd_dump_hashmaps.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: LGPL-2.1+
+
+from __future__ import print_function
+
+import gdb
+
+class sd_dump_hashmaps(gdb.Command):
+ "dump systemd's hashmaps"
+
+ def __init__(self):
+ super(sd_dump_hashmaps, self).__init__("sd_dump_hashmaps", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
+
+ def invoke(self, arg, from_tty):
+ d = gdb.parse_and_eval("hashmap_debug_list")
+ all_entry_sizes = gdb.parse_and_eval("all_entry_sizes")
+ all_direct_buckets = gdb.parse_and_eval("all_direct_buckets")
+ uchar_t = gdb.lookup_type("unsigned char")
+ ulong_t = gdb.lookup_type("unsigned long")
+ debug_offset = gdb.parse_and_eval("(unsigned long)&((HashmapBase*)0)->debug")
+
+ print("type, hash, indirect, entries, max_entries, buckets, creator")
+ while d:
+ h = gdb.parse_and_eval("(HashmapBase*)((char*)%d - %d)" % (int(d.cast(ulong_t)), debug_offset))
+
+ if h["has_indirect"]:
+ storage_ptr = h["indirect"]["storage"].cast(uchar_t.pointer())
+ n_entries = h["indirect"]["n_entries"]
+ n_buckets = h["indirect"]["n_buckets"]
+ else:
+ storage_ptr = h["direct"]["storage"].cast(uchar_t.pointer())
+ n_entries = h["n_direct_entries"]
+ n_buckets = all_direct_buckets[int(h["type"])];
+
+ t = ["plain", "ordered", "set"][int(h["type"])]
+
+ print("{}, {}, {}, {}, {}, {}, {} ({}:{})".format(t, h["hash_ops"], bool(h["has_indirect"]), n_entries, d["max_entries"], n_buckets, d["func"], d["file"], d["line"]))
+
+ if arg != "" and n_entries > 0:
+ dib_raw_addr = storage_ptr + (all_entry_sizes[h["type"]] * n_buckets)
+
+ histogram = {}
+ for i in xrange(0, n_buckets):
+ dib = int(dib_raw_addr[i])
+ histogram[dib] = histogram.get(dib, 0) + 1
+
+ for dib in sorted(iter(histogram)):
+ if dib != 255:
+ print("{:>3} {:>8} {} of entries".format(dib, histogram[dib], 100.0*histogram[dib]/n_entries))
+ else:
+ print("{:>3} {:>8} {} of slots".format(dib, histogram[dib], 100.0*histogram[dib]/n_buckets))
+ print("mean DIB of entries: {}".format(sum([dib*histogram[dib] for dib in iter(histogram) if dib != 255])*1.0/n_entries))
+
+ blocks = []
+ current_len = 1
+ prev = int(dib_raw_addr[0])
+ for i in xrange(1, n_buckets):
+ dib = int(dib_raw_addr[i])
+ if (dib == 255) != (prev == 255):
+ if prev != 255:
+ blocks += [[i, current_len]]
+ current_len = 1
+ else:
+ current_len += 1
+
+ prev = dib
+ if prev != 255:
+ blocks += [[i, current_len]]
+ # a block may be wrapped around
+ if len(blocks) > 1 and blocks[0][0] == blocks[0][1] and blocks[-1][0] == n_buckets - 1:
+ blocks[0][1] += blocks[-1][1]
+ blocks = blocks[0:-1]
+ print("max block: {}".format(max(blocks, key=lambda a: a[1])))
+ print("sum block lens: {}".format(sum(b[1] for b in blocks)))
+ print("mean block len: {}".format((1.0 * sum(b[1] for b in blocks) / len(blocks))))
+
+ d = d["debug_list_next"]
+
+sd_dump_hashmaps()
diff --git a/tools/generate-gperfs.py b/tools/generate-gperfs.py
new file mode 100755
index 0000000..5392df0
--- /dev/null
+++ b/tools/generate-gperfs.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: LGPL-2.1+
+
+"""
+Generate %-from-name.gperf from %-list.txt
+"""
+
+import sys
+
+name, prefix, input = sys.argv[1:]
+
+print("""\
+%{
+#if __GNUC__ >= 7
+_Pragma("GCC diagnostic ignored \\"-Wimplicit-fallthrough\\"")
+#endif
+%}""")
+print("""\
+struct {}_name {{ const char* name; int id; }};
+%null-strings
+%%""".format(name))
+
+for line in open(input):
+ print("{0}, {1}{0}".format(line.rstrip(), prefix))
diff --git a/tools/make-directive-index.py b/tools/make-directive-index.py
new file mode 100755
index 0000000..9d94487
--- /dev/null
+++ b/tools/make-directive-index.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: LGPL-2.1+
+
+import sys
+import collections
+import re
+from xml_helper import xml_parse, xml_print, tree
+from copy import deepcopy
+
+TEMPLATE = '''\
+<refentry id="systemd.directives" conditional="HAVE_PYTHON">
+
+ <refentryinfo>
+ <title>systemd.directives</title>
+ <productname>systemd</productname>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>systemd.directives</refentrytitle>
+ <manvolnum>7</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>systemd.directives</refname>
+ <refpurpose>Index of configuration directives</refpurpose>
+ </refnamediv>
+
+ <refsect1>
+ <title>Unit directives</title>
+
+ <para>Directives for configuring units, used in unit
+ files.</para>
+
+ <variablelist id='unit-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title>Options on the kernel command line</title>
+
+ <para>Kernel boot options for configuring the behaviour of the
+ systemd process.</para>
+
+ <variablelist id='kernel-commandline-options' />
+ </refsect1>
+
+ <refsect1>
+ <title>Environment variables</title>
+
+ <para>Environment variables understood by the systemd manager
+ and other programs and environment variable-compatible settings.</para>
+
+ <variablelist id='environment-variables' />
+ </refsect1>
+
+ <refsect1>
+ <title>EFI variables</title>
+
+ <para>EFI variables understood by
+ <citerefentry><refentrytitle>systemd-boot</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ and other programs.</para>
+
+ <variablelist id='efi-variables' />
+ </refsect1>
+
+ <refsect1>
+ <title>UDEV directives</title>
+
+ <para>Directives for configuring systemd units through the
+ udev database.</para>
+
+ <variablelist id='udev-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title>Network directives</title>
+
+ <para>Directives for configuring network links through the
+ net-setup-link udev builtin and networks through
+ systemd-networkd.</para>
+
+ <variablelist id='network-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title>Journal fields</title>
+
+ <para>Fields in the journal events with a well known meaning.</para>
+
+ <variablelist id='journal-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title>PAM configuration directives</title>
+
+ <para>Directives for configuring PAM behaviour.</para>
+
+ <variablelist id='pam-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title><filename>/etc/crypttab</filename> and
+ <filename>/etc/fstab</filename> options</title>
+
+ <para>Options which influence mounted filesystems and
+ encrypted volumes.</para>
+
+ <variablelist id='fstab-options' />
+ </refsect1>
+
+ <refsect1>
+ <title><citerefentry><refentrytitle>systemd.nspawn</refentrytitle><manvolnum>5</manvolnum></citerefentry>
+ directives</title>
+
+ <para>Directives for configuring systemd-nspawn containers.</para>
+
+ <variablelist id='nspawn-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title>Program configuration options</title>
+
+ <para>Directives for configuring the behaviour of the
+ systemd process and other tools through configuration files.</para>
+
+ <variablelist id='config-directives' />
+ </refsect1>
+
+ <refsect1>
+ <title>Command line options</title>
+
+ <para>Command-line options accepted by programs in the
+ systemd suite.</para>
+
+ <variablelist id='options' />
+ </refsect1>
+
+ <refsect1>
+ <title>Constants</title>
+
+ <para>Various constant used and/or defined by systemd.</para>
+
+ <variablelist id='constants' />
+ </refsect1>
+
+ <refsect1>
+ <title>Miscellaneous options and directives</title>
+
+ <para>Other configuration elements which don't fit in
+ any of the above groups.</para>
+
+ <variablelist id='miscellaneous' />
+ </refsect1>
+
+ <refsect1>
+ <title>Files and directories</title>
+
+ <para>Paths and file names referred to in the
+ documentation.</para>
+
+ <variablelist id='filenames' />
+ </refsect1>
+
+ <refsect1>
+ <title>Colophon</title>
+ <para id='colophon' />
+ </refsect1>
+</refentry>
+'''
+
+COLOPHON = '''\
+This index contains {count} entries in {sections} sections,
+referring to {pages} individual manual pages.
+'''
+
+def _extract_directives(directive_groups, formatting, page):
+ t = xml_parse(page)
+ section = t.find('./refmeta/manvolnum').text
+ pagename = t.find('./refmeta/refentrytitle').text
+
+ storopt = directive_groups['options']
+ for variablelist in t.iterfind('.//variablelist'):
+ klass = variablelist.attrib.get('class')
+ storvar = directive_groups[klass or 'miscellaneous']
+ # <option>s go in OPTIONS, unless class is specified
+ for xpath, stor in (('./varlistentry/term/varname', storvar),
+ ('./varlistentry/term/option',
+ storvar if klass else storopt)):
+ for name in variablelist.iterfind(xpath):
+ text = re.sub(r'([= ]).*', r'\1', name.text).rstrip()
+ stor[text].append((pagename, section))
+ if text not in formatting:
+ # use element as formatted display
+ if name.text[-1] in '= ':
+ name.clear()
+ else:
+ name.tail = ''
+ name.text = text
+ formatting[text] = name
+
+ storfile = directive_groups['filenames']
+ for xpath, absolute_only in (('.//refsynopsisdiv//filename', False),
+ ('.//refsynopsisdiv//command', False),
+ ('.//filename', True)):
+ for name in t.iterfind(xpath):
+ if absolute_only and not (name.text and name.text.startswith('/')):
+ continue
+ if name.attrib.get('noindex'):
+ continue
+ name.tail = ''
+ if name.text:
+ if name.text.endswith('*'):
+ name.text = name.text[:-1]
+ if not name.text.startswith('.'):
+ text = name.text.partition(' ')[0]
+ if text != name.text:
+ name.clear()
+ name.text = text
+ if text.endswith('/'):
+ text = text[:-1]
+ storfile[text].append((pagename, section))
+ if text not in formatting:
+ # use element as formatted display
+ formatting[text] = name
+ else:
+ text = ' '.join(name.itertext())
+ storfile[text].append((pagename, section))
+ formatting[text] = name
+
+ storfile = directive_groups['constants']
+ for name in t.iterfind('.//constant'):
+ if name.attrib.get('noindex'):
+ continue
+ name.tail = ''
+ if name.text.startswith('('): # a cast, strip it
+ name.text = name.text.partition(' ')[2]
+ storfile[name.text].append((pagename, section))
+ formatting[name.text] = name
+
+def _make_section(template, name, directives, formatting):
+ varlist = template.find(".//*[@id='{}']".format(name))
+ for varname, manpages in sorted(directives.items()):
+ entry = tree.SubElement(varlist, 'varlistentry')
+ term = tree.SubElement(entry, 'term')
+ display = deepcopy(formatting[varname])
+ term.append(display)
+
+ para = tree.SubElement(tree.SubElement(entry, 'listitem'), 'para')
+
+ b = None
+ for manpage, manvolume in sorted(set(manpages)):
+ if b is not None:
+ b.tail = ', '
+ b = tree.SubElement(para, 'citerefentry')
+ c = tree.SubElement(b, 'refentrytitle')
+ c.text = manpage
+ c.attrib['target'] = varname
+ d = tree.SubElement(b, 'manvolnum')
+ d.text = manvolume
+ entry.tail = '\n\n'
+
+def _make_colophon(template, groups):
+ count = 0
+ pages = set()
+ for group in groups:
+ count += len(group)
+ for pagelist in group.values():
+ pages |= set(pagelist)
+
+ para = template.find(".//para[@id='colophon']")
+ para.text = COLOPHON.format(count=count,
+ sections=len(groups),
+ pages=len(pages))
+
+def _make_page(template, directive_groups, formatting):
+ """Create an XML tree from directive_groups.
+
+ directive_groups = {
+ 'class': {'variable': [('manpage', 'manvolume'), ...],
+ 'variable2': ...},
+ ...
+ }
+ """
+ for name, directives in directive_groups.items():
+ _make_section(template, name, directives, formatting)
+
+ _make_colophon(template, directive_groups.values())
+
+ return template
+
+def make_page(*xml_files):
+ "Extract directives from xml_files and return XML index tree."
+ template = tree.fromstring(TEMPLATE)
+ names = [vl.get('id') for vl in template.iterfind('.//variablelist')]
+ directive_groups = {name:collections.defaultdict(list)
+ for name in names}
+ formatting = {}
+ for page in xml_files:
+ try:
+ _extract_directives(directive_groups, formatting, page)
+ except Exception:
+ raise ValueError("failed to process " + page)
+
+ return _make_page(template, directive_groups, formatting)
+
+if __name__ == '__main__':
+ with open(sys.argv[1], 'wb') as f:
+ f.write(xml_print(make_page(*sys.argv[2:])))
diff --git a/tools/make-man-index.py b/tools/make-man-index.py
new file mode 100755
index 0000000..66027af
--- /dev/null
+++ b/tools/make-man-index.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: LGPL-2.1+
+
+import collections
+import sys
+import re
+from xml_helper import xml_parse, xml_print, tree
+
+MDASH = ' — ' if sys.version_info.major >= 3 else ' -- '
+
+TEMPLATE = '''\
+<refentry id="systemd.index" conditional="HAVE_PYTHON">
+
+ <refentryinfo>
+ <title>systemd.index</title>
+ <productname>systemd</productname>
+ </refentryinfo>
+
+ <refmeta>
+ <refentrytitle>systemd.index</refentrytitle>
+ <manvolnum>7</manvolnum>
+ </refmeta>
+
+ <refnamediv>
+ <refname>systemd.index</refname>
+ <refpurpose>List all manpages from the systemd project</refpurpose>
+ </refnamediv>
+</refentry>
+'''
+
+SUMMARY = '''\
+ <refsect1>
+ <title>See Also</title>
+ <para>
+ <citerefentry><refentrytitle>systemd.directives</refentrytitle><manvolnum>7</manvolnum></citerefentry>
+ </para>
+
+ <para id='counts' />
+ </refsect1>
+'''
+
+COUNTS = '\
+This index contains {count} entries, referring to {pages} individual manual pages.'
+
+
+def check_id(page, t):
+ id = t.getroot().get('id')
+ if not re.search('/' + id + '[.]', page):
+ raise ValueError("id='{}' is not the same as page name '{}'".format(id, page))
+
+def make_index(pages):
+ index = collections.defaultdict(list)
+ for p in pages:
+ t = xml_parse(p)
+ check_id(p, t)
+ section = t.find('./refmeta/manvolnum').text
+ refname = t.find('./refnamediv/refname').text
+ purpose = ' '.join(t.find('./refnamediv/refpurpose').text.split())
+ for f in t.findall('./refnamediv/refname'):
+ infos = (f.text, section, purpose, refname)
+ index[f.text[0].upper()].append(infos)
+ return index
+
+def add_letter(template, letter, pages):
+ refsect1 = tree.SubElement(template, 'refsect1')
+ title = tree.SubElement(refsect1, 'title')
+ title.text = letter
+ para = tree.SubElement(refsect1, 'para')
+ for info in sorted(pages, key=lambda info: str.lower(info[0])):
+ refname, section, purpose, realname = info
+
+ b = tree.SubElement(para, 'citerefentry')
+ c = tree.SubElement(b, 'refentrytitle')
+ c.text = refname
+ d = tree.SubElement(b, 'manvolnum')
+ d.text = section
+
+ b.tail = MDASH + purpose # + ' (' + p + ')'
+
+ tree.SubElement(para, 'sbr')
+
+def add_summary(template, indexpages):
+ count = 0
+ pages = set()
+ for group in indexpages:
+ count += len(group)
+ for info in group:
+ refname, section, purpose, realname = info
+ pages.add((realname, section))
+
+ refsect1 = tree.fromstring(SUMMARY)
+ template.append(refsect1)
+
+ para = template.find(".//para[@id='counts']")
+ para.text = COUNTS.format(count=count, pages=len(pages))
+
+def make_page(*xml_files):
+ template = tree.fromstring(TEMPLATE)
+ index = make_index(xml_files)
+
+ for letter in sorted(index):
+ add_letter(template, letter, index[letter])
+
+ add_summary(template, index.values())
+
+ return template
+
+if __name__ == '__main__':
+ with open(sys.argv[1], 'wb') as f:
+ f.write(xml_print(make_page(*sys.argv[2:])))
diff --git a/tools/make-man-rules.py b/tools/make-man-rules.py
new file mode 100755
index 0000000..c4551c6
--- /dev/null
+++ b/tools/make-man-rules.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: LGPL-2.1+
+
+from __future__ import print_function
+import collections
+import sys
+import os.path
+import pprint
+from xml_helper import xml_parse
+
+def man(page, number):
+ return '{}.{}'.format(page, number)
+
+def xml(file):
+ return os.path.basename(file)
+
+def add_rules(rules, name):
+ xml = xml_parse(name)
+ # print('parsing {}'.format(name), file=sys.stderr)
+ if xml.getroot().tag != 'refentry':
+ return
+ conditional = xml.getroot().get('conditional') or ''
+ rulegroup = rules[conditional]
+ refmeta = xml.find('./refmeta')
+ title = refmeta.find('./refentrytitle').text
+ number = refmeta.find('./manvolnum').text
+ refnames = xml.findall('./refnamediv/refname')
+ target = man(refnames[0].text, number)
+ if title != refnames[0].text:
+ raise ValueError('refmeta and refnamediv disagree: ' + name)
+ for refname in refnames:
+ assert all(refname not in group
+ for group in rules.values()), "duplicate page name"
+ alias = man(refname.text, number)
+ rulegroup[alias] = target
+ # print('{} => {} [{}]'.format(alias, target, conditional), file=sys.stderr)
+
+def create_rules(xml_files):
+ " {conditional => {alias-name => source-name}} "
+ rules = collections.defaultdict(dict)
+ for name in xml_files:
+ try:
+ add_rules(rules, name)
+ except Exception:
+ print("Failed to process", name, file=sys.stderr)
+ raise
+ return rules
+
+def mjoin(files):
+ return ' \\\n\t'.join(sorted(files) or '#')
+
+MESON_HEADER = '''\
+# Do not edit. Generated by make-man-rules.py.
+manpages = ['''
+
+MESON_FOOTER = '''\
+]
+# Really, do not edit.'''
+
+def make_mesonfile(rules, dist_files):
+ # reformat rules as
+ # grouped = [ [name, section, [alias...], condition], ...]
+ #
+ # but first create a dictionary like
+ # lists = { (name, condition) => [alias...]
+ grouped = collections.defaultdict(list)
+ for condition, items in rules.items():
+ for alias, name in items.items():
+ group = grouped[(name, condition)]
+ if name != alias:
+ group.append(alias)
+
+ lines = [ [p[0][:-2], p[0][-1], sorted(a[:-2] for a in aliases), p[1]]
+ for p, aliases in sorted(grouped.items()) ]
+ return '\n'.join((MESON_HEADER, pprint.pformat(lines)[1:-1], MESON_FOOTER))
+
+if __name__ == '__main__':
+ pages = sys.argv[1:]
+
+ rules = create_rules(pages)
+ dist_files = (xml(file) for file in pages
+ if not file.endswith(".directives.xml") and
+ not file.endswith(".index.xml"))
+ print(make_mesonfile(rules, dist_files))
diff --git a/tools/meson-apply-m4.sh b/tools/meson-apply-m4.sh
new file mode 100755
index 0000000..6abe177
--- /dev/null
+++ b/tools/meson-apply-m4.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+set -eu
+
+CONFIG=$1
+TARGET=$2
+
+if [ $# -ne 2 ]; then
+ echo 'Invalid number of arguments.'
+ exit 1
+fi
+
+if [ ! -f $CONFIG ]; then
+ echo "$CONFIG not found."
+ exit 2
+fi
+
+if [ ! -f $TARGET ]; then
+ echo "$TARGET not found."
+ exit 3
+fi
+
+DEFINES=$(awk '$1 == "#define" && $3 == "1" { printf "-D%s ", $2 }' $CONFIG)
+
+m4 -P $DEFINES $TARGET
diff --git a/tools/meson-build.sh b/tools/meson-build.sh
new file mode 100755
index 0000000..dea5541
--- /dev/null
+++ b/tools/meson-build.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -eux
+
+src="$1"
+dst="$2"
+target="$3"
+options="$4"
+CC="$5"
+CXX="$6"
+
+[ -f "$dst/ninja.build" ] || CC="$CC" CXX="$CXX" meson "$src" "$dst" $options
+
+# Locate ninja binary, on CentOS 7 it is called ninja-build, so
+# use that name if available.
+ninja=ninja
+if which ninja-build >/dev/null 2>&1 ; then
+ ninja=ninja-build
+fi
+
+"$ninja" -C "$dst" "$target"
diff --git a/tools/meson-check-api-docs.sh b/tools/meson-check-api-docs.sh
new file mode 100755
index 0000000..a654368
--- /dev/null
+++ b/tools/meson-check-api-docs.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+set -eu
+
+sd_good=0
+sd_total=0
+udev_good=0
+udev_total=0
+
+for symbol in `nm -g --defined-only "$@" | grep " T " | cut -d" " -f3 | sort -u` ; do
+ if test -f ${MESON_BUILD_ROOT}/man/$symbol.3 ; then
+ echo "✓ Symbol $symbol() is documented."
+ good=1
+ else
+ printf " \x1b[1;31mSymbol $symbol() lacks documentation.\x1b[0m\n"
+ good=0
+ fi
+
+ case $symbol in
+ sd_*)
+ ((sd_good+=good))
+ ((sd_total+=1))
+ ;;
+ udev_*)
+ ((udev_good+=good))
+ ((udev_total+=1))
+ ;;
+ *)
+ echo 'unknown symbol prefix'
+ exit 1
+ esac
+done
+
+echo "libsystemd: $sd_good/$sd_total libudev: $udev_good/$udev_total"
diff --git a/tools/meson-check-compilation.sh b/tools/meson-check-compilation.sh
new file mode 100755
index 0000000..ce39e16
--- /dev/null
+++ b/tools/meson-check-compilation.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+set -eu
+
+"$@" '-' -o/dev/null </dev/null
diff --git a/tools/meson-check-help.sh b/tools/meson-check-help.sh
new file mode 100755
index 0000000..6915710
--- /dev/null
+++ b/tools/meson-check-help.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+set -eu
+
+export SYSTEMD_LOG_LEVEL=info
+
+# output width
+if "$1" --help | grep -v 'default:' | grep -E -q '.{80}.'; then
+ echo "$(basename "$1") --help output is too wide:"
+ "$1" --help | awk 'length > 80' | grep -E --color=yes '.{80}'
+ exit 1
+fi
+
+# no --help output to stdout
+if "$1" --help 2>&1 1>/dev/null | grep .; then
+ echo "$(basename "$1") --help prints to stderr"
+ exit 2
+fi
+
+# error output to stderr
+if ! "$1" --no-such-parameter 2>&1 1>/dev/null | grep -q .; then
+ echo "$(basename "$1") with an unknown parameter does not print to stderr"
+ exit 3
+fi
diff --git a/tools/meson-git-contrib.sh b/tools/meson-git-contrib.sh
new file mode 100755
index 0000000..514daa0
--- /dev/null
+++ b/tools/meson-git-contrib.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+
+git shortlog -s `git describe --abbrev=0`.. | \
+ cut -c8- | \
+ sed 's/ / /g' | \
+ awk '{ print $$0 "," }' | \
+ sed -e 's/ / /g' | \
+ sort -u
diff --git a/tools/meson-hwdb-update.sh b/tools/meson-hwdb-update.sh
new file mode 100755
index 0000000..f9ef241
--- /dev/null
+++ b/tools/meson-hwdb-update.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+set -eu
+
+cd "$1"
+
+unset permissive
+if [ "${2:-}" = "-p" ]; then
+ permissive=1
+ shift
+else
+ permissive=0
+fi
+
+if [ "${2:-}" != "-n" ]; then (
+ [ -z "$permissive" ] || set +e
+ set -x
+
+ curl -L -o usb.ids 'http://www.linux-usb.org/usb.ids'
+ curl -L -o pci.ids 'http://pci-ids.ucw.cz/v2.2/pci.ids'
+ curl -L -o ma-large.txt 'http://standards-oui.ieee.org/oui/oui.txt'
+ curl -L -o ma-medium.txt 'http://standards-oui.ieee.org/oui28/mam.txt'
+ curl -L -o ma-small.txt 'http://standards-oui.ieee.org/oui36/oui36.txt'
+ curl -L -o pnp_id_registry.html 'http://www.uefi.org/uefi-pnp-export'
+ curl -L -o acpi_id_registry.html 'http://www.uefi.org/uefi-acpi-export'
+) fi
+
+set -x
+./acpi-update.py >20-acpi-vendor.hwdb.base
+patch -p0 -o- 20-acpi-vendor.hwdb.base <20-acpi-vendor.hwdb.patch >20-acpi-vendor.hwdb
+! diff -u 20-acpi-vendor.hwdb.base 20-acpi-vendor.hwdb >20-acpi-vendor.hwdb.patch
+
+./ids_parser.py
diff --git a/tools/meson-make-symlink.sh b/tools/meson-make-symlink.sh
new file mode 100755
index 0000000..501cd43
--- /dev/null
+++ b/tools/meson-make-symlink.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+
+# this is needed mostly because $DESTDIR is provided as a variable,
+# and we need to create the target directory...
+
+mkdir -vp "$(dirname "${DESTDIR:-}$2")"
+if [ "$(dirname $1)" = . ]; then
+ ln -vfs -T "$1" "${DESTDIR:-}$2"
+else
+ ln -vfs -T --relative "${DESTDIR:-}$1" "${DESTDIR:-}$2"
+fi
diff --git a/tools/meson-vcs-tag.sh b/tools/meson-vcs-tag.sh
new file mode 100755
index 0000000..c8033d0
--- /dev/null
+++ b/tools/meson-vcs-tag.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+set -eu
+set -o pipefail
+
+dir="$1"
+tag="$2"
+fallback="$3"
+
+if [ -n "$tag" ]; then
+ echo "$tag"
+ exit 0
+fi
+
+# Apparently git describe has a bug where it always considers the work-tree
+# dirty when invoked with --git-dir (even though 'git status' is happy). Work
+# around this issue by cd-ing to the source directory.
+cd "$dir" && git describe --abbrev=7 --dirty=+ 2>/dev/null | sed 's/^v//' || echo "$fallback"
diff --git a/tools/oss-fuzz.sh b/tools/oss-fuzz.sh
new file mode 100755
index 0000000..9a116be
--- /dev/null
+++ b/tools/oss-fuzz.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+# SPDX-License-Identifier: LGPL-2.1+
+
+set -ex
+
+export LC_CTYPE=C.UTF-8
+
+export CC=${CC:-clang}
+export CXX=${CXX:-clang++}
+clang_version="$($CC --version | sed -nr 's/.*version ([^ ]+?) .*/\1/p' | sed -r 's/-$//')"
+
+SANITIZER=${SANITIZER:-address -fsanitize-address-use-after-scope}
+flags="-O1 -fno-omit-frame-pointer -gline-tables-only -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -fsanitize=$SANITIZER -fsanitize-coverage=trace-pc-guard,trace-cmp"
+
+clang_lib="/usr/lib64/clang/${clang_version}/lib/linux"
+[ -d "$clang_lib" ] || clang_lib="/usr/lib/clang/${clang_version}/lib/linux"
+
+export CFLAGS=${CFLAGS:-$flags}
+export CXXFLAGS=${CXXFLAGS:-$flags}
+export LDFLAGS=${LDFLAGS:--L${clang_lib}}
+
+export WORK=${WORK:-$(pwd)}
+export OUT=${OUT:-$(pwd)/out}
+mkdir -p $OUT
+
+build=$WORK/build
+rm -rf $build
+mkdir -p $build
+
+fuzzflag="oss-fuzz=true"
+if [ -z "$FUZZING_ENGINE" ]; then
+ fuzzflag="llvm-fuzz=true"
+fi
+
+meson $build -D$fuzzflag -Db_lundef=false
+ninja -C $build fuzzers
+
+# The seed corpus is a separate flat archive for each fuzzer,
+# with a fixed name ${fuzzer}_seed_corpus.zip.
+for d in "$(dirname "$0")/../test/fuzz/fuzz-"*; do
+ zip -jqr $OUT/$(basename "$d")_seed_corpus.zip "$d"
+done
+
+# get fuzz-dns-packet corpus
+df=$build/dns-fuzzing
+git clone --depth 1 https://github.com/CZ-NIC/dns-fuzzing $df
+zip -jqr $OUT/fuzz-dns-packet_seed_corpus.zip $df/packet
+
+install -Dt $OUT/src/shared/ $build/src/shared/libsystemd-shared-*.so
+
+wget -O $OUT/fuzz-json_seed_corpus.zip https://storage.googleapis.com/skia-fuzzer/oss-fuzz/skjson_seed_corpus.zip
+wget -O $OUT/fuzz-json.dict https://raw.githubusercontent.com/rc0r/afl-fuzz/master/dictionaries/json.dict
+
+find $build -maxdepth 1 -type f -executable -name "fuzz-*" -exec mv {} $OUT \;
+find src -type f -name "fuzz-*.dict" -exec cp {} $OUT \;
+cp src/fuzz/*.options $OUT
diff --git a/tools/xml_helper.py b/tools/xml_helper.py
new file mode 100755
index 0000000..f399e74
--- /dev/null
+++ b/tools/xml_helper.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: LGPL-2.1+
+
+from lxml import etree as tree
+
+class CustomResolver(tree.Resolver):
+ def resolve(self, url, id, context):
+ if 'custom-entities.ent' in url:
+ return self.resolve_filename('man/custom-entities.ent', context)
+
+_parser = tree.XMLParser()
+_parser.resolvers.add(CustomResolver())
+
+def xml_parse(page):
+ doc = tree.parse(page, _parser)
+ doc.xinclude()
+ return doc
+
+def xml_print(xml):
+ return tree.tostring(xml, pretty_print=True, encoding='utf-8')