summaryrefslogtreecommitdiffstats
path: root/test/test-functions
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 20:49:52 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-10 20:49:52 +0000
commit55944e5e40b1be2afc4855d8d2baf4b73d1876b5 (patch)
tree33f869f55a1b149e9b7c2b7e201867ca5dd52992 /test/test-functions
parentInitial commit. (diff)
downloadsystemd-55944e5e40b1be2afc4855d8d2baf4b73d1876b5.tar.xz
systemd-55944e5e40b1be2afc4855d8d2baf4b73d1876b5.zip
Adding upstream version 255.4.upstream/255.4
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'test/test-functions')
-rw-r--r--test/test-functions3546
1 files changed, 3546 insertions, 0 deletions
diff --git a/test/test-functions b/test/test-functions
new file mode 100644
index 0000000..0698b30
--- /dev/null
+++ b/test/test-functions
@@ -0,0 +1,3546 @@
+#!/usr/bin/env bash
+# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# shellcheck disable=SC2030,SC2031
+# ex: ts=8 sw=4 sts=4 et filetype=sh tw=180
+# Note: the shellcheck line above disables warning for variables which were
+# modified in a subshell. In our case this behavior is expected, but
+# `shellcheck` can't distinguish this because of poor variable tracking,
+# which results in warning for every instance of such variable used
+# throughout this file.
+# See:
+# * comment in function install_verity_minimal()
+# * koalaman/shellcheck#280
+set -o pipefail
+
+# Simple wrapper to unify boolean checks.
+# Note: this function needs to stay near the top of the file, so we can use it
+# in code in the outermost scope.
+get_bool() {
+ # Make the value lowercase to make the regex matching simpler
+ local _bool="${1,,}"
+
+ # Consider empty value as "false"
+ if [[ -z "$_bool" || "$_bool" =~ ^(0|no|false)$ ]]; then
+ return 1
+ elif [[ "$_bool" =~ ^(1|yes|true)$ ]]; then
+ return 0
+ else
+ echo >&2 "Value '$_bool' is not a valid boolean value"
+ exit 1
+ fi
+}
+
+PATH=/sbin:/bin:/usr/sbin:/usr/bin
+export PATH
+
+os_release=$(test -e /etc/os-release && echo /etc/os-release || echo /usr/lib/os-release)
+# shellcheck source=/dev/null
+source "$os_release"
+[[ "$ID" == "debian" || " $ID_LIKE " == *" debian "* ]] && LOOKS_LIKE_DEBIAN=yes || LOOKS_LIKE_DEBIAN=no
+[[ "$ID" == "arch" || " $ID_LIKE " == *" arch "* ]] && LOOKS_LIKE_ARCH=yes || LOOKS_LIKE_ARCH=no
+[[ "$ID" == "fedora" ]] && LOOKS_LIKE_FEDORA=yes || LOOKS_LIKE_FEDORA=no
+[[ " $ID_LIKE " == *" suse "* ]] && LOOKS_LIKE_SUSE=yes || LOOKS_LIKE_SUSE=no
+
+KERNEL_VER="${KERNEL_VER-$(uname -r)}"
+QEMU_TIMEOUT="${QEMU_TIMEOUT:-1800}"
+NSPAWN_TIMEOUT="${NSPAWN_TIMEOUT:-1800}"
+TIMED_OUT= # will be 1 after run_* if *_TIMEOUT is set and test timed out
+get_bool "$LOOKS_LIKE_SUSE" && FSTYPE="${FSTYPE:-btrfs}" || FSTYPE="${FSTYPE:-ext4}"
+UNIFIED_CGROUP_HIERARCHY="${UNIFIED_CGROUP_HIERARCHY:-default}"
+EFI_MOUNT="${EFI_MOUNT:-$(bootctl -x 2>/dev/null || echo /boot)}"
+# Note that defining a different IMAGE_NAME in a test setup script will only result
+# in default.img being copied and renamed. It can then be extended by defining
+# a test_append_files() function. The $1 parameter will be the root directory.
+# To force creating a new image from scratch (eg: to encrypt it), also define
+# TEST_FORCE_NEWIMAGE=1 in the test setup script.
+IMAGE_NAME=${IMAGE_NAME:-default}
+TEST_REQUIRE_INSTALL_TESTS="${TEST_REQUIRE_INSTALL_TESTS:-1}"
+TEST_PARALLELIZE="${TEST_PARALLELIZE:-0}"
+TEST_SUPPORTING_SERVICES_SHOULD_BE_MASKED="${TEST_SUPPORTING_SERVICES_SHOULD_BE_MASKED:-1}"
+LOOPDEV=
+
+# Since in Bash we can have only one handler per signal, let's overcome this
+# limitation by having one global handler for the EXIT signal which executes
+# all registered handlers
+_AT_EXIT_HANDLERS=()
+_at_exit() {
+ set +e
+
+ # Run the EXIT handlers in reverse order
+ for ((i = ${#_AT_EXIT_HANDLERS[@]} - 1; i >= 0; i--)); do
+ ddebug "Running EXIT handler '${_AT_EXIT_HANDLERS[$i]}'"
+ eval "${_AT_EXIT_HANDLERS[$i]}"
+ done
+}
+
+trap _at_exit EXIT
+
+add_at_exit_handler() {
+ _AT_EXIT_HANDLERS+=("${1:?}")
+}
+
+# Decide if we can (and want to) run qemu with KVM acceleration.
+# Check if nested KVM is explicitly enabled (TEST_NESTED_KVM). If not,
+# check if it's not explicitly disabled (TEST_NO_KVM) and we're not already
+# running under KVM. If these conditions are met, enable KVM (and possibly
+# nested KVM), otherwise disable it.
+if get_bool "${TEST_NESTED_KVM:=}" || (! get_bool "${TEST_NO_KVM:=}" && ! systemd-detect-virt -qv); then
+ QEMU_KVM=yes
+else
+ QEMU_KVM=no
+fi
+
+if ! ROOTLIBDIR=$(pkg-config --variable=systemdutildir systemd); then
+ echo "WARNING! Cannot determine libdir from pkg-config, assuming /usr/lib/systemd" >&2
+ ROOTLIBDIR=/usr/lib/systemd
+fi
+
+# The calling test.sh scripts have TEST_BASE_DIR set via their Makefile, but we don't need them to provide it
+TEST_BASE_DIR=${TEST_BASE_DIR:-$(realpath "$(dirname "${BASH_SOURCE[0]}")")}
+TEST_UNITS_DIR="$(realpath "$TEST_BASE_DIR/units")"
+SOURCE_DIR=$(realpath "$TEST_BASE_DIR/..")
+# These variables are used by test scripts
+export TEST_BASE_DIR TEST_UNITS_DIR SOURCE_DIR
+
+TOOLS_DIR="$SOURCE_DIR/tools"
+# note that find-build-dir.sh will return $BUILD_DIR if provided, else it will try to find it
+if get_bool "${NO_BUILD:=}"; then
+ BUILD_DIR="$SOURCE_DIR"
+elif ! BUILD_DIR="$("$TOOLS_DIR"/find-build-dir.sh)"; then
+ echo "ERROR: no build found, please set BUILD_DIR or use NO_BUILD" >&2
+ exit 1
+fi
+
+PATH_TO_INIT="$ROOTLIBDIR/systemd"
+SYSTEMD_JOURNALD="${SYSTEMD_JOURNALD:-$(command -v "$BUILD_DIR/systemd-journald" || command -v "$ROOTLIBDIR/systemd-journald")}"
+SYSTEMD_JOURNAL_REMOTE="${SYSTEMD_JOURNAL_REMOTE:-$(command -v "$BUILD_DIR/systemd-journal-remote" || command -v "$ROOTLIBDIR/systemd-journal-remote" || echo "")}"
+SYSTEMD="${SYSTEMD:-$(command -v "$BUILD_DIR/systemd" || command -v "$ROOTLIBDIR/systemd")}"
+SYSTEMD_NSPAWN="${SYSTEMD_NSPAWN:-$(command -v "$BUILD_DIR/systemd-nspawn" || command -v systemd-nspawn)}"
+JOURNALCTL="${JOURNALCTL:-$(command -v "$BUILD_DIR/journalctl" || command -v journalctl)}"
+SYSTEMCTL="${SYSTEMCTL:-$(command -v "$BUILD_DIR/systemctl" || command -v systemctl)}"
+
+TESTFILE="${BASH_SOURCE[1]}"
+if [ -z "$TESTFILE" ]; then
+ echo "ERROR: test-functions must be sourced from one of the TEST-*/test.sh scripts" >&2
+ exit 1
+fi
+TESTNAME="$(basename "$(dirname "$(realpath "$TESTFILE")")")"
+
+WORKDIR="/var/tmp/systemd-tests"
+if get_bool "${NO_BUILD:=}"; then
+ STATEDIR="$WORKDIR/$TESTNAME"
+else
+ STATEDIR="$BUILD_DIR/test/$TESTNAME"
+fi
+
+STATEFILE="$STATEDIR/.testdir"
+IMAGESTATEDIR="$STATEDIR/.."
+TESTLOG="$STATEDIR/test.log"
+
+if ! [[ "$TESTNAME" =~ ^TEST\-([0-9]+)\-.+$ ]]; then
+ echo "ERROR: Test name '$TESTNAME' is not in the expected format: TEST-[0-9]+-*" >&2
+ exit 1
+fi
+TESTID="${BASH_REMATCH[1]:?}"
+
+if [[ ! -f "$TEST_UNITS_DIR/testsuite-$TESTID.service" ]]; then
+ echo "ERROR: Test '$TESTNAME' is missing its service file '$TEST_UNITS_DIR/testsuite-$TESTID.service" >&2
+ exit 1
+fi
+
+BASICTOOLS=(
+ awk
+ base64
+ basename
+ bash
+ capsh
+ cat
+ chgrp
+ chmod
+ chown
+ chroot
+ cmp
+ cp
+ cryptsetup
+ cut
+ date
+ dd
+ dhclient
+ diff
+ dirname
+ dmsetup
+ echo
+ env
+ false
+ find
+ findmnt
+ flock
+ getconf
+ getent
+ getfacl
+ getfattr
+ grep
+ grep
+ gunzip
+ gzip
+ head
+ hostname
+ id
+ ionice
+ ip
+ jq
+ killall
+ ldd
+ ln
+ ln
+ loadkeys
+ login
+ losetup
+ ls
+ lsattr
+ lsblk
+ lz4cat
+ mkdir
+ mkfifo
+ mknod
+ mktemp
+ modprobe
+ mount
+ mountpoint
+ mv
+ nc
+ nproc
+ ping
+ pkill
+ ps
+ readlink
+ realpath
+ rev
+ rm
+ rmdir
+ rmmod
+ route
+ script
+ sed
+ seq
+ setfacl
+ setfattr
+ setfont
+ setpriv
+ setsid
+ sfdisk
+ sh
+ sleep
+ sort
+ stat
+ stty
+ su
+ sulogin
+ sysctl
+ tail
+ tar
+ tee
+ test
+ timeout
+ touch
+ tr
+ true
+ truncate
+ tty
+ umount
+ uname
+ unshare
+ useradd
+ userdel
+ wc
+ whoami
+ xargs
+ xzcat
+)
+
+DEBUGTOOLS=(
+ df
+ dmesg
+ du
+ free
+ less
+ strace
+ vi
+ /usr/libexec/vi
+)
+
+is_built_with_asan() {
+ local _bin="${1:?}"
+
+ if ! type -P objdump >/dev/null; then
+ echo "Failed to find objdump, assuming systemd hasn't been built with ASAN."
+ return 1
+ fi
+
+ # Borrowed from https://github.com/google/oss-fuzz/blob/cd9acd02f9d3f6e80011cc1e9549be526ce5f270/infra/base-images/base-runner/bad_build_check#L182
+ local _asan_calls
+ _asan_calls="$(objdump -dC "$_bin" | grep -E "(callq?|brasl?|bl)\s.+__asan" -c)"
+ if ((_asan_calls < 1000)); then
+ return 1
+ else
+ return 0
+ fi
+}
+
+is_built_with_coverage() {
+ if get_bool "${NO_BUILD:=}" || ! command -v meson >/dev/null; then
+ return 1
+ fi
+
+ meson configure "${BUILD_DIR:?}" | grep 'b_coverage' | awk '{ print $2 }' | grep -q 'true'
+}
+
+IS_BUILT_WITH_ASAN=$(is_built_with_asan "$SYSTEMD_JOURNALD" && echo yes || echo no)
+IS_BUILT_WITH_COVERAGE=$(is_built_with_coverage && echo yes || echo no)
+
+if get_bool "$IS_BUILT_WITH_ASAN"; then
+ PATH_TO_INIT="$ROOTLIBDIR/systemd-under-asan"
+ QEMU_MEM="${QEMU_MEM:-2G}"
+ QEMU_SMP="${QEMU_SMP:-4}"
+
+ # We need to correctly distinguish between gcc's and clang's ASan DSOs.
+ if ASAN_RT_NAME="$(awk '/libasan.so/ {x=$1; exit} END {print x; exit x==""}' < <(ldd "$SYSTEMD"))"; then
+ ASAN_COMPILER=gcc
+ ASAN_RT_PATH="$(readlink -f "$(${CC:-gcc} --print-file-name "$ASAN_RT_NAME")")"
+ elif ASAN_RT_NAME="$(awk '/libclang_rt.asan/ {x=$1; exit} END {print x; exit x==""}' < <(ldd "$SYSTEMD"))"; then
+ ASAN_COMPILER=clang
+ ASAN_RT_PATH="$(readlink -f "$(${CC:-clang} --print-file-name "$ASAN_RT_NAME")")"
+
+ # As clang's ASan DSO is usually in a non-standard path, let's check if
+ # the environment is set accordingly. If not, warn the user and exit.
+ # We're not setting the LD_LIBRARY_PATH automagically here, because
+ # user should encounter (and fix) the same issue when running the unit
+ # tests (meson test)
+ if ldd "$SYSTEMD" | grep -q "libclang_rt.asan.*not found"; then
+ echo >&2 "clang's ASan DSO ($ASAN_RT_NAME) is not present in the runtime library path"
+ echo >&2 "Consider setting LD_LIBRARY_PATH=${ASAN_RT_PATH%/*}"
+ exit 1
+ fi
+ else
+ echo >&2 "systemd is not linked against the ASan DSO"
+ echo >&2 "gcc does this by default, for clang compile with -shared-libasan"
+ exit 1
+ fi
+
+ echo "Detected ASan RT '$ASAN_RT_NAME' located at '$ASAN_RT_PATH'"
+fi
+
+test_require_bin() {
+ local bin
+
+ for bin in "$@"; do
+ if ! command -v "$bin" >/dev/null; then
+ echo "Required binary $bin not available, skipping the test"
+ exit 0
+ fi
+ done
+}
+
+find_qemu_bin() {
+ QEMU_BIN="${QEMU_BIN:-""}"
+ # SUSE and Red Hat call the binary qemu-kvm. Debian and Gentoo call it kvm.
+ if get_bool "$QEMU_KVM"; then
+ [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v kvm qemu-kvm 2>/dev/null | grep '^/' -m1)"
+ fi
+
+ [[ -n "$ARCH" ]] || ARCH="$(uname -m)"
+ case $ARCH in
+ x86_64)
+ # QEMU's own build system calls it qemu-system-x86_64
+ [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu-system-x86_64 2>/dev/null | grep '^/' -m1)"
+ ;;
+ i*86)
+ # new i386 version of QEMU
+ [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu-system-i386 2>/dev/null | grep '^/' -m1)"
+
+ # i386 version of QEMU
+ [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu 2>/dev/null | grep '^/' -m1)"
+ ;;
+ ppc64*)
+ [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu-system-ppc64 2>/dev/null | grep '^/' -m1)"
+ ;;
+ esac
+
+ if [[ ! -e "$QEMU_BIN" ]]; then
+ echo "Could not find a suitable qemu binary" >&2
+ return 1
+ fi
+}
+
+qemu_setup_swtpm_socket() {
+ local pid state_dir tpm_device
+
+ if ! tpm_device="$(qemu_get_tpm_device)"; then
+ dinfo "Found QEMU version is too old for TPM2 on ppc64le"
+ exit 0
+ fi
+
+ state_dir="$(mktemp -d)"
+ swtpm socket --tpm2 --tpmstate dir="$state_dir" --ctrl type=unixio,path="$state_dir/sock" &
+ pid=$!
+ if ! kill -0 "$pid"; then
+ derror "Failed to start swtpm"
+ return 1
+ fi
+
+ if ! timeout 5 bash -c "until [[ -S $state_dir/sock ]]; do sleep .5; done"; then
+ derror "Failed to setup swtpm socket"
+ return 1
+ fi
+
+ dinfo "Started swtpm as PID $pid with state dir $state_dir"
+
+ add_at_exit_handler "kill -TERM $pid 2>/dev/null; rm -rf '$state_dir'"
+
+ QEMU_OPTIONS+=" -chardev socket,id=chrtpm,path=$state_dir/sock -tpmdev emulator,id=tpm0,chardev=chrtpm -device $tpm_device,tpmdev=tpm0"
+ dinfo "Configured emulated TPM2 device $tpm_device"
+
+ return 0
+}
+
+qemu_get_tpm_device() {
+ local tpm_device="tpm-tis"
+
+ if [[ "$(uname -m)" == "ppc64le" ]]; then
+ # tpm-spapr support was introduced in qemu 5.0.0
+ if ! qemu_min_version "5.0.0"; then
+ return 1
+ fi
+
+ tpm_device="tpm-spapr"
+ fi
+
+ echo "$tpm_device"
+ return 0
+}
+
+# Compares argument #1=X.Y.Z (X&Y&Z = numeric) to the version of the installed qemu
+# returns 0 if newer or equal
+# returns 1 if older
+# returns 2 if failing
+qemu_min_version() {
+ find_qemu_bin || return 2
+
+ # get version from binary
+ local qemu_ver
+ qemu_ver="$("$QEMU_BIN" --version | awk '/^QEMU emulator version ([0-9]*\.[0-9]*\.[0-9]*)/ {print $4}')"
+
+ # Check version string format
+ echo "$qemu_ver" | grep -q '^[0-9]*\.[0-9]*\.[0-9]*$' || return 2
+ echo "$1" | grep -q '^[0-9]*\.[0-9]*\.[0-9]*$' || return 2
+
+ # compare as last command to return that value
+ printf "%s\n%s\n" "$1" "$qemu_ver" | sort -V -C
+}
+
+# Pads a file to multiple of 4 bytes
+pad4_file() {
+ local size
+ size=$(stat -c "%s" "$1")
+ local padded
+ padded=$((((size + 3) / 4) * 4))
+ truncate -s "$padded" "$1"
+}
+
+# Return 0 if qemu did run (then you must check the result state/logs for actual
+# success), or 1 if qemu is not available.
+run_qemu() {
+ if declare -F run_qemu_hook >/dev/null; then
+ if ! run_qemu_hook "${workspace}"; then
+ derror "check_qemu_hook() returned with EC > 0"
+ ret=4
+ fi
+ fi
+
+ # If the test provided its own initrd, use it (e.g. TEST-24)
+ if [[ -z "$INITRD" && -f "${TESTDIR:?}/initrd.img" ]]; then
+ INITRD="$TESTDIR/initrd.img"
+ fi
+
+ if [ -f /etc/machine-id ]; then
+ read -r MACHINE_ID </etc/machine-id
+ [ -z "$INITRD" ] && [ -e "$EFI_MOUNT/$MACHINE_ID/$KERNEL_VER/initrd" ] \
+ && INITRD="$EFI_MOUNT/$MACHINE_ID/$KERNEL_VER/initrd"
+ [ -z "$KERNEL_BIN" ] && [ -e "$EFI_MOUNT/$MACHINE_ID/$KERNEL_VER/linux" ] \
+ && KERNEL_BIN="$EFI_MOUNT/$MACHINE_ID/$KERNEL_VER/linux"
+ fi
+
+ local CONSOLE=ttyS0
+
+ # Reset the boot counter, if present
+ rm -f "${initdir:?}/var/tmp/.systemd_reboot_count"
+ rm -f "$initdir"/{testok,failed,skipped}
+ # make sure the initdir is not mounted to avoid concurrent access
+ cleanup_initdir
+ umount_loopback
+
+ if [[ ! "$KERNEL_BIN" ]]; then
+ if get_bool "$LOOKS_LIKE_ARCH"; then
+ KERNEL_BIN=/boot/vmlinuz-linux
+ else
+ [ "$ARCH" ] || ARCH=$(uname -m)
+ case $ARCH in
+ ppc64*)
+ # Ubuntu ppc64* calls the kernel binary as vmlinux-*, RHEL/CentOS
+ # uses the "standard" vmlinuz- prefix
+ [[ -e "/boot/vmlinux-$KERNEL_VER" ]] && KERNEL_BIN="/boot/vmlinux-$KERNEL_VER" || KERNEL_BIN="/boot/vmlinuz-$KERNEL_VER"
+ CONSOLE=hvc0
+ ;;
+ *)
+ KERNEL_BIN="/boot/vmlinuz-$KERNEL_VER"
+ ;;
+ esac
+ fi
+ fi
+
+ local default_fedora_initrd="/boot/initramfs-${KERNEL_VER}.img"
+ local default_debian_initrd="/boot/initrd.img-${KERNEL_VER}"
+ local default_arch_initrd="/boot/initramfs-linux-fallback.img"
+ local default_suse_initrd="/boot/initrd-${KERNEL_VER}"
+ if [[ ! "$INITRD" ]]; then
+ if [[ -e "$default_fedora_initrd" ]]; then
+ INITRD="$default_fedora_initrd"
+ elif get_bool "$LOOKS_LIKE_DEBIAN" && [[ -e "$default_debian_initrd" ]]; then
+ INITRD="$default_debian_initrd"
+ elif get_bool "$LOOKS_LIKE_ARCH" && [[ -e "$default_arch_initrd" ]]; then
+ INITRD="$default_arch_initrd"
+ elif get_bool "$LOOKS_LIKE_SUSE" && [[ -e "$default_suse_initrd" ]]; then
+ INITRD="$default_suse_initrd"
+ fi
+ fi
+
+ # If QEMU_SMP was not explicitly set, try to determine the value 'dynamically'
+ # i.e. use the number of online CPUs on the host machine. If the nproc utility
+ # is not installed or there's some other error when calling it, fall back
+ # to the original value (QEMU_SMP=1).
+ if [[ -z "${QEMU_SMP:=}" ]]; then
+ if ! QEMU_SMP=$(nproc); then
+ dwarn "nproc utility is not installed, falling back to QEMU_SMP=1"
+ QEMU_SMP=1
+ fi
+ fi
+
+ find_qemu_bin || return 1
+
+ if get_bool "${TEST_SETUP_SWTPM:-}"; then
+ qemu_setup_swtpm_socket || return 1
+ fi
+
+ # Umount initdir to avoid concurrent access to the filesystem
+ _umount_dir "$initdir"
+
+ local kernel_params=()
+ local qemu_options=()
+ local qemu_cmd=("$QEMU_BIN")
+
+ if [[ "$UNIFIED_CGROUP_HIERARCHY" = "yes" ]]; then
+ kernel_params+=("systemd.unified_cgroup_hierarchy=yes")
+ elif [[ "$UNIFIED_CGROUP_HIERARCHY" = "no" ]]; then
+ kernel_params+=("systemd.unified_cgroup_hierarchy=no" "systemd.legacy_systemd_cgroup_controller=yes")
+ elif [[ "$UNIFIED_CGROUP_HIERARCHY" = "hybrid" ]]; then
+ kernel_params+=("systemd.unified_cgroup_hierarchy=no" "systemd.legacy_systemd_cgroup_controller=no")
+ elif [[ "$UNIFIED_CGROUP_HIERARCHY" != "default" ]]; then
+ dfatal "Unknown UNIFIED_CGROUP_HIERARCHY. Got $UNIFIED_CGROUP_HIERARCHY, expected [yes|no|hybrid|default]"
+ exit 1
+ fi
+
+ if get_bool "$LOOKS_LIKE_SUSE"; then
+ kernel_params+=("rd.hostonly=0")
+ fi
+
+ # Debian/Ubuntu's initramfs tries to check if it can resume from hibernation
+ # and wastes a minute or so probing disks, skip that as it's not useful here
+ kernel_params+=(
+ "root=LABEL=systemd_boot"
+ "rw"
+ "raid=noautodetect"
+ "rd.luks=0"
+ "loglevel=2"
+ "init=$PATH_TO_INIT"
+ "console=$CONSOLE"
+ "SYSTEMD_UNIT_PATH=/usr/lib/systemd/tests/testdata/testsuite-$1.units:/usr/lib/systemd/tests/testdata/units:"
+ "systemd.unit=testsuite.target"
+ "systemd.wants=testsuite-$1.service"
+ "noresume"
+ "oops=panic"
+ ${TEST_MATCH_SUBTEST:+"systemd.setenv=TEST_MATCH_SUBTEST=$TEST_MATCH_SUBTEST"}
+ ${TEST_MATCH_TESTCASE:+"systemd.setenv=TEST_MATCH_TESTCASE=$TEST_MATCH_TESTCASE"}
+ )
+
+ if ! get_bool "$INTERACTIVE_DEBUG" && ! get_bool "$TEST_SKIP_SHUTDOWN"; then
+ kernel_params+=(
+ "panic=1"
+ "softlockup_panic=1"
+ "systemd.wants=end.service"
+ )
+ fi
+
+ [ -e "$IMAGE_PRIVATE" ] && image="$IMAGE_PRIVATE" || image="$IMAGE_PUBLIC"
+ qemu_options+=(
+ -smp "$QEMU_SMP"
+ -net none
+ -m "${QEMU_MEM:-768M}"
+ -nographic
+ -kernel "$KERNEL_BIN"
+ -drive "format=raw,cache=unsafe,file=$image"
+ -device "virtio-rng-pci,max-bytes=1024,period=1000"
+ )
+
+ if [[ -n "${QEMU_OPTIONS:=}" ]]; then
+ local user_qemu_options
+ read -ra user_qemu_options <<< "$QEMU_OPTIONS"
+ qemu_options+=("${user_qemu_options[@]}")
+ fi
+ qemu_options+=(${QEMU_OPTIONS_ARRAY:+"${QEMU_OPTIONS_ARRAY[@]}"})
+
+ if [[ -n "$INITRD" ]]; then
+ if [[ -n "$INITRD_EXTRA" ]]; then
+ # An addition initrd has been specified, let's combine it with the main one.
+ local t="$WORKDIR"/initrd.combined."$RANDOM"
+
+ # First, show contents of additional initrd
+ echo "Additional initrd contents:"
+ cpio -tv < "$INITRD_EXTRA"
+
+ # Copy the main initrd
+ zstd -d -c -f "$INITRD" > "$t"
+ add_at_exit_handler "rm $t"
+ # Kernel requires this to be padded to multiple of 4 bytes with zeroes
+ pad4_file "$t"
+
+ # Copy the additional initrd
+ cat "$INITRD_EXTRA" >> "$t"
+ pad4_file "$t"
+
+ qemu_options+=(-initrd "$t")
+ else
+ qemu_options+=(-initrd "$INITRD")
+ fi
+ fi
+
+ # Let's use KVM if possible
+ if [[ -c /dev/kvm ]] && get_bool $QEMU_KVM; then
+ qemu_options+=(-machine "accel=kvm" -enable-kvm -cpu host)
+ fi
+
+ if [[ "$QEMU_TIMEOUT" != "infinity" ]]; then
+ qemu_cmd=(timeout --foreground "$QEMU_TIMEOUT" "$QEMU_BIN")
+ fi
+
+ (set -x; "${qemu_cmd[@]}" "${qemu_options[@]}" -append "${kernel_params[*]} ${KERNEL_APPEND:-}" |& tee "${TESTDIR:?}/console.log")
+ rc=$?
+ if [ "$rc" -eq 124 ] && [ "$QEMU_TIMEOUT" != "infinity" ]; then
+ derror "Test timed out after ${QEMU_TIMEOUT}s"
+ TIMED_OUT=1
+ else
+ [ "$rc" != 0 ] && derror "qemu failed with exit code $rc"
+ fi
+ return 0
+}
+
+# Return 0 if nspawn did run (then you must check the result state/logs for actual
+# success), or 1 if nspawn is not available.
+run_nspawn() {
+ [[ -d /run/systemd/system ]] || return 1
+ # Reset the boot counter, if present
+ rm -f "${initdir:?}/var/tmp/.systemd_reboot_count"
+ rm -f "${initdir:?}"/{testok,failed,skipped}
+
+ local nspawn_cmd=()
+ local nspawn_options=(
+ "--register=no"
+ "--kill-signal=SIGKILL"
+ "--directory=${1:?}"
+ "--setenv=SYSTEMD_UNIT_PATH=/usr/lib/systemd/tests/testdata/testsuite-$2.units:/usr/lib/systemd/tests/testdata/units:"
+ "--machine=TEST-$TESTID"
+ )
+ local kernel_params=(
+ "$PATH_TO_INIT"
+ "systemd.unit=testsuite.target"
+ "systemd.wants=testsuite-$2.service"
+ ${TEST_MATCH_SUBTEST:+"systemd.setenv=TEST_MATCH_SUBTEST=$TEST_MATCH_SUBTEST"}
+ ${TEST_MATCH_TESTCASE:+"systemd.setenv=TEST_MATCH_TESTCASE=$TEST_MATCH_TESTCASE"}
+ )
+
+ if get_bool "$INTERACTIVE_DEBUG"; then
+ nspawn_options+=("--console=interactive")
+ elif ! get_bool "$TEST_SKIP_SHUTDOWN"; then
+ kernel_params+=("systemd.wants=end.service")
+ fi
+
+ if [[ -n "${NSPAWN_ARGUMENTS:=}" ]]; then
+ local user_nspawn_arguments
+ read -ra user_nspawn_arguments <<< "$NSPAWN_ARGUMENTS"
+ nspawn_options+=("${user_nspawn_arguments[@]}")
+ fi
+
+ if [[ "$UNIFIED_CGROUP_HIERARCHY" = "hybrid" ]]; then
+ dwarn "nspawn doesn't support SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=hybrid, skipping"
+ exit
+ elif [[ "$UNIFIED_CGROUP_HIERARCHY" = "yes" || "$UNIFIED_CGROUP_HIERARCHY" = "no" ]]; then
+ nspawn_cmd+=(env "SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=$UNIFIED_CGROUP_HIERARCHY")
+ elif [[ "$UNIFIED_CGROUP_HIERARCHY" = "default" ]]; then
+ nspawn_cmd+=(env "--unset=UNIFIED_CGROUP_HIERARCHY" "--unset=SYSTEMD_NSPAWN_UNIFIED_HIERARCHY")
+ else
+ dfatal "Unknown UNIFIED_CGROUP_HIERARCHY. Got $UNIFIED_CGROUP_HIERARCHY, expected [yes|no|hybrid|default]"
+ exit 1
+ fi
+
+ if [[ "$NSPAWN_TIMEOUT" != "infinity" ]]; then
+ nspawn_cmd+=(timeout --foreground "$NSPAWN_TIMEOUT" "$SYSTEMD_NSPAWN")
+ else
+ nspawn_cmd+=("$SYSTEMD_NSPAWN")
+ fi
+
+ # Word splitting here is intentional
+ # shellcheck disable=SC2086
+ (set -x; "${nspawn_cmd[@]}" "${nspawn_options[@]}" "${kernel_params[@]}" ${KERNEL_APPEND:-} |& tee "${TESTDIR:?}/console.log")
+ rc=$?
+ if [ "$rc" -eq 124 ] && [ "$NSPAWN_TIMEOUT" != "infinity" ]; then
+ derror "Test timed out after ${NSPAWN_TIMEOUT}s"
+ TIMED_OUT=1
+ else
+ [ "$rc" != 0 ] && derror "nspawn failed with exit code $rc"
+ fi
+ return 0
+}
+
+# Build two very minimal root images, with two units, one is the same and one is different across them
+install_verity_minimal() {
+ dinfo "Set up a set of minimal images for verity verification"
+ if [ -e "$initdir/usr/share/minimal.raw" ]; then
+ return
+ fi
+ if ! command -v mksquashfs >/dev/null 2>&1; then
+ dfatal "mksquashfs not found"
+ exit 1
+ fi
+ if ! command -v veritysetup >/dev/null 2>&1; then
+ dfatal "veritysetup not found"
+ exit 1
+ fi
+ # Local modifications of some global variables is intentional in this
+ # subshell (SC2030)
+ # shellcheck disable=SC2030
+ (
+ BASICTOOLS=(
+ bash
+ cat
+ grep
+ mount
+ sleep
+ touch
+ )
+ oldinitdir="$initdir"
+ rm -rfv "$TESTDIR/minimal"
+ export initdir="$TESTDIR/minimal"
+ # app0 will use TemporaryFileSystem=/var/lib, app1 will need the mount point in the base image
+ mkdir -p "$initdir/usr/lib/systemd/system" "$initdir/usr/lib/extension-release.d" "$initdir/etc" "$initdir/var/tmp" "$initdir/opt" "$initdir/var/lib/app1"
+ setup_basic_dirs
+ install_basic_tools
+ install_ld_so_conf
+ # Shellcheck treats [[ -v VAR ]] as an assignment to avoid a different
+ # issue, thus falsely triggering SC2030 in this case
+ # See: koalaman/shellcheck#1409
+ if [[ -v ASAN_RT_PATH ]]; then
+ # If we're compiled with ASan, install the ASan RT (and its dependencies)
+ # into the verity images to get rid of the annoying errors about
+ # missing $LD_PRELOAD libraries.
+ inst_libs "$ASAN_RT_PATH"
+ inst_library "$ASAN_RT_PATH"
+ fi
+ cp "$os_release" "$initdir/usr/lib/os-release"
+ ln -s ../usr/lib/os-release "$initdir/etc/os-release"
+ touch "$initdir/etc/machine-id" "$initdir/etc/resolv.conf"
+ touch "$initdir/opt/some_file"
+ echo MARKER=1 >>"$initdir/usr/lib/os-release"
+ echo "PORTABLE_PREFIXES=app0 minimal minimal-app0" >>"$initdir/usr/lib/os-release"
+ cat >"$initdir/usr/lib/systemd/system/minimal-app0.service" <<EOF
+[Service]
+ExecStartPre=cat /usr/lib/os-release
+ExecStart=sleep 120
+EOF
+ cp "$initdir/usr/lib/systemd/system/minimal-app0.service" "$initdir/usr/lib/systemd/system/minimal-app0-foo.service"
+
+ mksquashfs "$initdir" "$oldinitdir/usr/share/minimal_0.raw" -noappend
+ veritysetup format "$oldinitdir/usr/share/minimal_0.raw" "$oldinitdir/usr/share/minimal_0.verity" | \
+ grep '^Root hash:' | cut -f2 | tr -d '\n' >"$oldinitdir/usr/share/minimal_0.roothash"
+
+ sed -i "s/MARKER=1/MARKER=2/g" "$initdir/usr/lib/os-release"
+ rm "$initdir/usr/lib/systemd/system/minimal-app0-foo.service"
+ cp "$initdir/usr/lib/systemd/system/minimal-app0.service" "$initdir/usr/lib/systemd/system/minimal-app0-bar.service"
+
+ mksquashfs "$initdir" "$oldinitdir/usr/share/minimal_1.raw" -noappend
+ veritysetup format "$oldinitdir/usr/share/minimal_1.raw" "$oldinitdir/usr/share/minimal_1.verity" | \
+ grep '^Root hash:' | cut -f2 | tr -d '\n' >"$oldinitdir/usr/share/minimal_1.roothash"
+
+ # Rolling distros like Arch do not set VERSION_ID
+ local version_id=""
+ if grep -q "^VERSION_ID=" "$os_release"; then
+ version_id="$(grep "^VERSION_ID=" "$os_release")"
+ fi
+
+ export initdir="$TESTDIR/app0"
+ mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system" "$initdir/opt"
+ grep "^ID=" "$os_release" >"$initdir/usr/lib/extension-release.d/extension-release.app0"
+ echo "${version_id}" >>"$initdir/usr/lib/extension-release.d/extension-release.app0"
+ ( echo "${version_id}"
+ echo "SYSEXT_IMAGE_ID=app" ) >>"$initdir/usr/lib/extension-release.d/extension-release.app0"
+ cat >"$initdir/usr/lib/systemd/system/app0.service" <<EOF
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/opt/script0.sh
+TemporaryFileSystem=/var/lib
+StateDirectory=app0
+RuntimeDirectory=app0
+EOF
+ cat >"$initdir/opt/script0.sh" <<EOF
+#!/bin/bash
+set -e
+test -e /usr/lib/os-release
+echo bar >\${STATE_DIRECTORY}/foo
+cat /usr/lib/extension-release.d/extension-release.app0
+EOF
+ chmod +x "$initdir/opt/script0.sh"
+ echo MARKER=1 >"$initdir/usr/lib/systemd/system/some_file"
+ mksquashfs "$initdir" "$oldinitdir/usr/share/app0.raw" -noappend
+
+ export initdir="$TESTDIR/conf0"
+ mkdir -p "$initdir/etc/extension-release.d" "$initdir/etc/systemd/system" "$initdir/opt"
+ grep "^ID=" "$os_release" >"$initdir/etc/extension-release.d/extension-release.conf0"
+ echo "${version_id}" >>"$initdir/etc/extension-release.d/extension-release.conf0"
+ ( echo "${version_id}"
+ echo "CONFEXT_IMAGE_ID=app" ) >>"$initdir/etc/extension-release.d/extension-release.conf0"
+ echo MARKER_1 >"$initdir/etc/systemd/system/some_file"
+ mksquashfs "$initdir" "$oldinitdir/usr/share/conf0.raw" -noappend
+
+ export initdir="$TESTDIR/app1"
+ mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system" "$initdir/opt"
+ grep "^ID=" "$os_release" >"$initdir/usr/lib/extension-release.d/extension-release.app2"
+ ( echo "${version_id}"
+ echo "SYSEXT_SCOPE=portable"
+ echo "SYSEXT_IMAGE_ID=app"
+ echo "SYSEXT_IMAGE_VERSION=1"
+ echo "PORTABLE_PREFIXES=app1" ) >>"$initdir/usr/lib/extension-release.d/extension-release.app2"
+ setfattr -n user.extension-release.strict -v false "$initdir/usr/lib/extension-release.d/extension-release.app2"
+ cat >"$initdir/usr/lib/systemd/system/app1.service" <<EOF
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=/opt/script1.sh
+StateDirectory=app1
+RuntimeDirectory=app1
+EOF
+ cat >"$initdir/opt/script1.sh" <<EOF
+#!/bin/bash
+set -e
+test -e /usr/lib/os-release
+echo baz >\${STATE_DIRECTORY}/foo
+cat /usr/lib/extension-release.d/extension-release.app2
+EOF
+ chmod +x "$initdir/opt/script1.sh"
+ echo MARKER=1 >"$initdir/usr/lib/systemd/system/other_file"
+ mksquashfs "$initdir" "$oldinitdir/usr/share/app1.raw" -noappend
+
+ export initdir="$TESTDIR/app-nodistro"
+ mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system"
+ ( echo "ID=_any"
+ echo "ARCHITECTURE=_any" ) >"$initdir/usr/lib/extension-release.d/extension-release.app-nodistro"
+ echo MARKER=1 >"$initdir/usr/lib/systemd/system/some_file"
+ mksquashfs "$initdir" "$oldinitdir/usr/share/app-nodistro.raw" -noappend
+
+ export initdir="$TESTDIR/service-scoped-test"
+ mkdir -p "$initdir/etc/extension-release.d" "$initdir/etc/systemd/system"
+ ( echo "ID=_any"
+ echo "ARCHITECTURE=_any" ) >"$initdir/etc/extension-release.d/extension-release.service-scoped-test"
+ echo MARKER_CONFEXT_123 >"$initdir/etc/systemd/system/some_file"
+ mksquashfs "$initdir" "$oldinitdir/etc/service-scoped-test.raw" -noappend
+
+ # We need to create a dedicated sysext image to test the reload mechanism. If we share an image to install the
+ # 'foo.service' it will be loaded from another test run, which will impact the targeted test.
+ export initdir="$TESTDIR/app-reload"
+ mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system"
+ ( echo "ID=_any"
+ echo "ARCHITECTURE=_any"
+ echo "EXTENSION_RELOAD_MANAGER=1" ) >"$initdir/usr/lib/extension-release.d/extension-release.app-reload"
+ mkdir -p "$initdir/usr/lib/systemd/system/multi-user.target.d"
+ cat >"${initdir}/usr/lib/systemd/system/foo.service" <<EOF
+[Service]
+Type=oneshot
+RemainAfterExit=yes
+ExecStart=echo foo
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ { echo "[Unit]"; echo "Upholds=foo.service"; } > "$initdir/usr/lib/systemd/system/multi-user.target.d/10-foo-service.conf"
+ mksquashfs "$initdir" "$oldinitdir/usr/share/app-reload.raw" -noappend
+ )
+}
+
+setup_basic_environment() {
+ # create the basic filesystem layout
+ setup_basic_dirs
+
+ install_systemd
+ install_missing_libraries
+ install_config_files
+ install_zoneinfo
+ create_rc_local
+ install_basic_tools
+ install_libnss
+ install_pam
+ install_dbus
+ install_fonts
+ install_locales
+ install_keymaps
+ install_x11_keymaps
+ install_terminfo
+ install_execs
+ install_fs_tools
+ install_modules
+ install_plymouth
+ install_haveged
+ install_debug_tools
+ install_ld_so_conf
+ install_testuser
+ has_user_dbus_socket && install_user_dbus
+ setup_selinux
+ install_depmod_files
+ generate_module_dependencies
+ if get_bool "$IS_BUILT_WITH_ASAN"; then
+ create_asan_wrapper
+ fi
+ if get_bool "$TEST_INSTALL_VERITY_MINIMAL"; then
+ install_verity_minimal
+ fi
+}
+
+setup_selinux() {
+ dinfo "Setup SELinux"
+ # don't forget KERNEL_APPEND='... selinux=1 ...'
+ if ! get_bool "$SETUP_SELINUX"; then
+ dinfo "SETUP_SELINUX != yes, skipping SELinux configuration"
+ return 0
+ fi
+
+ for dir in /etc/selinux /usr/share/selinux; do
+ rm -rf "${initdir:?}/$dir"
+ if ! cp -ar "$dir" "$initdir/$dir"; then
+ dfatal "Failed to copy $dir"
+ exit 1
+ fi
+ done
+
+ # We use a custom autorelabel service instead of the SELinux provided set
+ # of units & a generator, since the generator overrides the default target
+ # to the SELinux one when it detects /.autorelabel. However, we use
+ # systemd.unit= on the kernel command cmdline which always takes precedence,
+ # rendering all SELinux efforts useless. Also, pulling in selinux-autorelabel.service
+ # explicitly doesn't work either, as it doesn't check for the presence of /.autorelabel
+ # and does the relabeling unconditionally which always ends with a reboot, so
+ # we end up in a reboot loop (and it also spews quite a lot of errors as it
+ # wants /etc/fstab and dracut-initramfs-restore).
+ touch "$initdir/.autorelabel"
+ mkdir -p "$initdir/usr/lib/systemd/tests/testdata/units/basic.target.wants"
+ ln -sf ../autorelabel.service "$initdir/usr/lib/systemd/tests/testdata/units/basic.target.wants/"
+
+ # Tools requires by fixfiles
+ image_install awk bash cat chcon expr egrep find grep head secon setfiles rm sort uname uniq
+ image_install fixfiles getenforce load_policy selinuxenabled sestatus
+}
+
+install_valgrind() {
+ if ! type -p valgrind; then
+ dfatal "Failed to install valgrind"
+ exit 1
+ fi
+
+ local valgrind_bins valgrind_libs valgrind_supp
+
+ readarray -t valgrind_bins < <(strace -e execve valgrind /bin/true 2>&1 >/dev/null |
+ sed -r -n 's/execve\("([^"]*)".*/\1/p')
+ image_install "${valgrind_bins[@]}"
+
+ readarray -t valgrind_libs < <(LD_DEBUG=files valgrind /bin/true 2>&1 >/dev/null |
+ sed -r -n 's|.*calling init: (/.*vgpreload_.*)|\1|p')
+ image_install "${valgrind_libs[@]}"
+
+ readarray -t valgrind_supp < <(strace -e open valgrind /bin/true 2>&1 >/dev/null |
+ sed -r -n 's,open\("([^"]*(/debug[^"]*|\.supp))".*= [0-9].*,\1,p')
+ image_install "${valgrind_supp[@]}"
+}
+
+create_valgrind_wrapper() {
+ local valgrind_wrapper="$initdir/$ROOTLIBDIR/systemd-under-valgrind"
+ ddebug "Create $valgrind_wrapper"
+ cat >"$valgrind_wrapper" <<EOF
+#!/usr/bin/env bash
+
+mount -t proc proc /proc
+exec valgrind --leak-check=full --track-fds=yes --log-file=/valgrind.out $ROOTLIBDIR/systemd "\$@"
+EOF
+ chmod 0755 "$valgrind_wrapper"
+}
+
+create_asan_wrapper() {
+ local asan_wrapper default_asan_options default_ubsan_options default_environment manager_environment
+
+ [[ -z "$ASAN_RT_PATH" ]] && dfatal "ASAN_RT_PATH is empty, but it shouldn't be"
+
+ default_asan_options="${ASAN_OPTIONS:-strict_string_checks=1:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1}"
+ default_ubsan_options="${UBSAN_OPTIONS:-print_stacktrace=1:print_summary=1:halt_on_error=1}"
+
+ if [[ "$ASAN_COMPILER" == "clang" ]]; then
+ # clang: install llvm-symbolizer to generate useful reports
+ # See: https://clang.llvm.org/docs/AddressSanitizer.html#symbolizing-the-reports
+ image_install "llvm-symbolizer"
+
+ # Let's add the ASan DSO's path to the dynamic linker's cache. This is pretty
+ # unnecessary for gcc & libasan, however, for clang this is crucial, as its
+ # runtime ASan DSO is in a non-standard (library) path.
+ mkdir -p "${initdir:?}/etc/ld.so.conf.d/"
+ echo "${ASAN_RT_PATH%/*}" >"${initdir:?}/etc/ld.so.conf.d/asan-path-override.conf"
+ ldconfig -r "$initdir"
+ fi
+
+ # Create a simple environment file which can be included by systemd services
+ # that need it (i.e. services that utilize DynamicUser=true and bash, etc.)
+ cat >"${initdir:?}/usr/lib/systemd/systemd-asan-env" <<EOF
+LD_PRELOAD=$ASAN_RT_PATH
+ASAN_OPTIONS=$default_asan_options
+LSAN_OPTIONS=detect_leaks=0
+UBSAN_OPTIONS=$default_ubsan_options
+EOF
+
+ default_environment=(
+ "ASAN_OPTIONS='$default_asan_options'"
+ "UBSAN_OPTIONS='$default_ubsan_options'"
+ "ASAN_RT_PATH='$ASAN_RT_PATH'"
+ )
+ manager_environment=(
+ "ASAN_OPTIONS='$default_asan_options:log_path=/systemd-pid1.asan.log:log_to_syslog=1'"
+ "UBSAN_OPTIONS='$default_ubsan_options:log_path=/systemd-pid1.ubsan.log:log_to_syslog=1'"
+ "ASAN_RT_PATH='$ASAN_RT_PATH'"
+ )
+
+ mkdir -p "${initdir:?}/etc/systemd/system.conf.d/"
+ cat >"${initdir:?}/etc/systemd/system.conf.d/asan.conf" <<EOF
+[Manager]
+DefaultEnvironment=${default_environment[*]}
+ManagerEnvironment=${manager_environment[*]}
+DefaultTimeoutStartSec=180s
+EOF
+
+ # ASAN and syscall filters aren't compatible with each other.
+ find "${initdir:?}" -name '*.service' -type f -print0 | xargs -0 sed -i 's/^\(MemoryDeny\|SystemCall\)/#\1/'
+
+ mkdir -p "${initdir:?}/etc/systemd/system/systemd-journald.service.d/"
+ cat >"${initdir:?}/etc/systemd/system/systemd-journald.service.d/asan-env.conf" <<EOF
+[Service]
+# The redirection of ASAN reports to a file prevents them from ending up in /dev/null.
+# But, apparently, sometimes it doesn't work: https://github.com/google/sanitizers/issues/886.
+Environment=ASAN_OPTIONS=$default_asan_options:log_path=/systemd-journald.asan.log UBSAN_OPTIONS=$default_ubsan_options:log_path=/systemd-journald.ubsan.log
+
+# Sometimes UBSan sends its reports to stderr regardless of what is specified in log_path
+# Let's try to catch them by redirecting stderr (and stdout just in case) to a file
+# See https://github.com/systemd/systemd/pull/12524#issuecomment-491108821
+StandardOutput=file:/systemd-journald.out
+EOF
+
+ # 90s isn't enough for some services to finish when literally everything is run
+ # under ASan+UBSan in containers, which, in turn, are run in VMs.
+ # Let's limit which environments such services should be executed in.
+ mkdir -p "${initdir:?}/etc/systemd/system/systemd-hwdb-update.service.d/"
+ cat >"${initdir:?}/etc/systemd/system/systemd-hwdb-update.service.d/asan.conf" <<EOF
+[Unit]
+ConditionVirtualization=container
+
+[Service]
+TimeoutSec=240s
+EOF
+
+ # Let's override another hard-coded timeout that kicks in too early
+ mkdir -p "${initdir:?}/etc/systemd/system/systemd-journal-flush.service.d/"
+ cat >"${initdir:?}/etc/systemd/system/systemd-journal-flush.service.d/asan.conf" <<EOF
+[Service]
+TimeoutSec=180s
+EOF
+
+ asan_wrapper="${initdir:?}/${PATH_TO_INIT:?}"
+ # Sanity check to make sure we don't overwrite something we shouldn't.
+ [[ "$asan_wrapper" =~ systemd-under-asan$ ]]
+
+ cat >"$asan_wrapper" <<EOF
+#!/usr/bin/env bash
+set -eux
+
+export PATH="/sbin:/bin:/usr/sbin:/usr/bin"
+export ${manager_environment[@]}
+[[ -n "\$ASAN_OPTIONS" && -n "\$UBSAN_OPTIONS" ]]
+
+exec "$ROOTLIBDIR/systemd" "\$@"
+EOF
+ chmod 0755 "$asan_wrapper"
+}
+
+create_strace_wrapper() {
+ local strace_wrapper="$initdir/$ROOTLIBDIR/systemd-under-strace"
+ ddebug "Create $strace_wrapper"
+ cat >"$strace_wrapper" <<EOF
+#!/usr/bin/env bash
+
+exec strace -f -D -o /strace.out "$ROOTLIBDIR/systemd" "\$@"
+EOF
+ chmod 0755 "$strace_wrapper"
+}
+
+install_fs_tools() {
+ dinfo "Install fsck"
+ image_install /sbin/fsck*
+ image_install -o /bin/fsck*
+
+ # fskc.reiserfs calls reiserfsck. so, install it
+ image_install -o reiserfsck
+
+ # we use mkfs in system-repart tests
+ image_install /sbin/mkfs.ext4
+ image_install /sbin/mkfs.vfat
+}
+
+install_modules() {
+ dinfo "Install modules"
+
+ instmods bridge dummy ext4 ipvlan macvlan vfat veth
+ instmods loop =block
+ instmods nls_ascii =nls
+ instmods overlay =overlayfs
+ instmods scsi_debug
+
+ if get_bool "$LOOKS_LIKE_SUSE"; then
+ instmods af_packet
+ fi
+}
+
+install_dmevent() {
+ instmods dm_crypt =crypto
+ inst_binary dmeventd
+ image_install "${ROOTLIBDIR:?}"/system/dm-event.{service,socket}
+ if get_bool "$LOOKS_LIKE_DEBIAN"; then
+ # dmsetup installs 55-dm and 60-persistent-storage-dm on Debian/Ubuntu
+ # and since buster/bionic 95-dm-notify.rules
+ # see https://gitlab.com/debian-lvm/lvm2/blob/master/debian/patches/udev.patch
+ inst_rules 55-dm.rules 60-persistent-storage-dm.rules 95-dm-notify.rules
+ else
+ inst_rules 10-dm.rules 13-dm-disk.rules 95-dm-notify.rules
+ fi
+ if get_bool "$LOOKS_LIKE_SUSE"; then
+ inst_rules 60-persistent-storage.rules 61-persistent-storage-compat.rules 99-systemd.rules
+ fi
+}
+
+install_multipath() {
+ instmods "=md" multipath
+ image_install kpartx /lib/udev/kpartx_id lsmod mpathpersist multipath multipathd partx
+ image_install "${ROOTLIBDIR:?}"/system/multipathd.{service,socket}
+ if get_bool "$LOOKS_LIKE_DEBIAN"; then
+ # Note: try both 60-kpartx.rules (as seen on Debian Sid with 0.9.4-7) and 90-kpartx.rules (as seen on
+ # Ubuntu Jammy with 0.8.8-1ubuntu1.22.04.4)
+ inst_rules 56-dm-parts.rules 56-dm-mpath.rules 60-kpartx.rules 60-multipath.rules 68-del-part-nodes.rules 90-kpartx.rules
+ else
+ inst_rules 11-dm-mpath.rules 11-dm-parts.rules 62-multipath.rules 66-kpartx.rules 68-del-part-nodes.rules
+ fi
+ mkdir -p "${initdir:?}/etc/multipath"
+
+ local file
+ while read -r file; do
+ # Install libraries required by the given library
+ inst_libs "$file"
+ # Install the library itself and create necessary symlinks
+ inst_library "$file"
+ done < <(find /lib*/multipath -type f)
+}
+
+install_lvm() {
+ local lvm_rules rule_prefix
+
+ image_install lvm
+ image_install "${ROOTLIBDIR:?}"/system/lvm2-lvmpolld.{service,socket}
+ image_install "${ROOTLIBDIR:?}"/system/{blk-availability,lvm2-monitor}.service
+ image_install -o "/lib/tmpfiles.d/lvm2.conf"
+
+ if get_bool "$LOOKS_LIKE_DEBIAN"; then
+ lvm_rules="56-lvm.rules"
+ rule_prefix=""
+ else
+ lvm_rules="11-dm-lvm.rules"
+ rule_prefix="dm-"
+ fi
+
+ # Support the new udev autoactivation introduced in lvm 2.03.14
+ # https://sourceware.org/git/?p=lvm2.git;a=commit;h=67722b312390cdab29c076c912e14bd739c5c0f6
+ # Static autoactivation (via lvm2-activation-generator) was dropped
+ # in lvm 2.03.15
+ # https://sourceware.org/git/?p=lvm2.git;a=commit;h=ee8fb0310c53ed003a43b324c99cdfd891dd1a7c
+ if [[ -f "/lib/udev/rules.d/69-${rule_prefix}lvm.rules" ]]; then
+ inst_rules "$lvm_rules" "69-${rule_prefix}lvm.rules"
+ else
+ image_install "${ROOTLIBDIR:?}"/system-generators/lvm2-activation-generator
+ image_install "${ROOTLIBDIR:?}"/system/lvm2-pvscan@.service
+ inst_rules "$lvm_rules" "69-${rule_prefix}lvm-metad.rules"
+ fi
+
+ mkdir -p "${initdir:?}/etc/lvm"
+}
+
+host_has_btrfs() (
+ set -e
+ modprobe -nv btrfs && command -v mkfs.btrfs && command -v btrfs || return $?
+)
+
+install_btrfs() {
+ instmods btrfs
+ # Not all utilities provided by btrfs-progs are listed here; extend the list
+ # if necessary
+ image_install btrfs btrfstune mkfs.btrfs
+ inst_rules 64-btrfs-dm.rules
+}
+
+install_iscsi() {
+ # Install both client and server side stuff by default
+ local inst="${1:-}"
+ local file
+
+ # Install client-side stuff ("initiator" in iSCSI jargon) - Open-iSCSI in this case
+ # (open-iscsi on Debian, iscsi-initiator-utils on Fedora, etc.)
+ if [[ -z "$inst" || "$inst" =~ (client|initiator) ]]; then
+ image_install iscsi-iname iscsiadm iscsid iscsistart
+ image_install -o "${ROOTLIBDIR:?}"/system/iscsi-{init,onboot,shutdown}.service
+ image_install "${ROOTLIBDIR:?}"/system/iscsid.{service,socket}
+ image_install "${ROOTLIBDIR:?}"/system/iscsi.service
+ mkdir -p "${initdir:?}"/var/lib/iscsi/{ifaces,isns,nodes,send_targets,slp,static}
+ mkdir -p "${initdir:?}/etc/iscsi"
+ echo "iscsid.startup = /bin/systemctl start iscsid.socket" >"${initdir:?}/etc/iscsi/iscsid.conf"
+ # Since open-iscsi 2.1.2 [0] the initiator name should be generated via
+ # a one-time service instead of distro package's post-install scripts.
+ # However, some distros still use this approach even after this patch,
+ # so prefer the already existing initiatorname.iscsi file if it exists.
+ #
+ # [0] https://github.com/open-iscsi/open-iscsi/commit/f37d5b653f9f251845db3f29b1a3dcb90ec89731
+ if [[ ! -e /etc/iscsi/initiatorname.iscsi ]]; then
+ image_install "${ROOTLIBDIR:?}"/system/iscsi-init.service
+ if get_bool "$IS_BUILT_WITH_ASAN"; then
+ # The iscsi-init.service calls `sh` which might, in certain circumstances,
+ # pull in instrumented systemd NSS modules causing `sh` to fail. Let's mitigate
+ # this by pulling in an env file crafted by `create_asan_wrapper()` that
+ # (among others) pre-loads ASan's DSO.
+ mkdir -p "${initdir:?}/etc/systemd/system/iscsi-init.service.d/"
+ printf "[Service]\nEnvironmentFile=/usr/lib/systemd/systemd-asan-env" >"${initdir:?}/etc/systemd/system/iscsi-init.service.d/asan-env.conf"
+ fi
+ else
+ inst_simple "/etc/iscsi/initiatorname.iscsi"
+ fi
+ fi
+
+ # Install server-side stuff ("target" in iSCSI jargon) - TGT in this case
+ # (tgt on Debian, scsi-target-utils on Fedora, etc.)
+ if [[ -z "$inst" || "$inst" =~ (server|target) ]]; then
+ image_install tgt-admin tgt-setup-lun tgtadm tgtd tgtimg
+ image_install -o /etc/sysconfig/tgtd
+ image_install "${ROOTLIBDIR:?}"/system/tgtd.service
+ mkdir -p "${initdir:?}/etc/tgt"
+ touch "${initdir:?}"/etc/tgt/{tgtd,targets}.conf
+ # Install perl modules required by tgt-admin
+ #
+ # Forgive me father for I have sinned. The monstrosity below appends
+ # a perl snippet to the `tgt-admin` perl script on the fly, which
+ # dumps a list of files (perl modules) required by `tgt-admin` at
+ # the runtime plus any DSOs loaded via DynaLoader. This list is then
+ # passed to `inst_simple` which installs the necessary files into the image
+ #
+ # shellcheck disable=SC2016
+ while read -r file; do
+ inst_simple "$file"
+ done < <(perl -- <(cat "$(command -v tgt-admin)" <(echo -e 'use DynaLoader; print map { "$_\n" } values %INC; print join("\n", @DynaLoader::dl_shared_objects)')) -p | awk '/^\// { print $1 }')
+ fi
+}
+
+host_has_mdadm() (
+ set -e
+ command -v mdadm || return $?
+)
+
+install_mdadm() {
+ local unit
+ local mdadm_units=(
+ system/mdadm-grow-continue@.service
+ system/mdadm-last-resort@.service
+ system/mdadm-last-resort@.timer
+ system/mdmon@.service
+ system/mdmonitor-oneshot.service
+ system/mdmonitor-oneshot.timer
+ system/mdmonitor.service
+ system-shutdown/mdadm.shutdown
+ )
+
+ instmods "=md"
+ image_install mdadm mdmon
+ inst_rules 01-md-raid-creating.rules 63-md-raid-arrays.rules 64-md-raid-assembly.rules 69-md-clustered-confirm-device.rules
+ # Fedora/CentOS/RHEL ships this rule file
+ [[ -f /lib/udev/rules.d/65-md-incremental.rules ]] && inst_rules 65-md-incremental.rules
+
+ for unit in "${mdadm_units[@]}"; do
+ image_install "${ROOTLIBDIR:?}/$unit"
+ done
+
+ # Disable the mdmonitor service, since it fails if there's no valid email address
+ # configured in /etc/mdadm.conf, which just unnecessarily pollutes the logs
+ "${SYSTEMCTL:?}" mask --root "${initdir:?}" mdmonitor.service || :
+}
+
+install_compiled_systemd() {
+ dinfo "Install compiled systemd"
+
+ local ninja_bin
+ ninja_bin="$(type -P ninja || type -P ninja-build)"
+ if [[ -z "$ninja_bin" ]]; then
+ dfatal "ninja was not found"
+ exit 1
+ fi
+ (set -x; DESTDIR="$initdir" "$ninja_bin" -C "$BUILD_DIR" install)
+
+ # If we are doing coverage runs, copy over the binary notes files, as lcov expects to
+ # find them in the same directory as the runtime data counts
+ if get_bool "$IS_BUILT_WITH_COVERAGE"; then
+ mkdir -p "${initdir}/${BUILD_DIR:?}/"
+ rsync -am --include='*/' --include='*.gcno' --exclude='*' "${BUILD_DIR:?}/" "${initdir}/${BUILD_DIR:?}/"
+ # Set effective & default ACLs for the build dir so unprivileged
+ # processes can write gcda files with coverage stats
+ setfacl -R -m 'd:o:rwX' -m 'o:rwX' "${initdir}/${BUILD_DIR:?}/"
+ fi
+}
+
+install_package_file() {
+ local file="${1:?}"
+
+ # Skip missing files (like /etc/machine-info)
+ [[ ! -e "$file" ]] && return 0
+ # Skip python unit tests, since the image_install machinery will try to pull
+ # in the whole python stack in a very questionable state, making the tests fail.
+ # And given we're trying to transition to mkosi-based images anyway I'm not even
+ # going to bother
+ [[ "$file" =~ /tests/unit-tests/.*.py$ ]] && return 0
+ # If the current file is a directory, create it with the original
+ # mode; if it's a symlink to a directory, copy it as-is
+ if [[ -d "$file" ]]; then
+ inst_dir "$file"
+ else
+ inst "$file"
+ fi
+}
+
+install_debian_systemd() {
+ dinfo "Install debian systemd"
+
+ local deb file
+
+ while read -r deb; do
+ ddebug "Install debian files from package $deb"
+ while read -r file; do
+ install_package_file "$file"
+ done < <(dpkg-query -L "$deb" 2>/dev/null)
+ done < <(grep -E '^Package:' "${SOURCE_DIR}/debian/control" | cut -d ':' -f 2)
+}
+
+install_rpm() {
+ local rpm="${1:?}"
+ local file
+
+ if ! rpm -q "$rpm" >/dev/null; then
+ derror "RPM $rpm is not installed"
+ return 1
+ fi
+
+ dinfo "Installing contents of RPM $rpm"
+ while read -r file; do
+ install_package_file "$file"
+ done < <(rpm -ql "$rpm")
+}
+
+install_suse_systemd() {
+ local pkgs
+
+ dinfo "Install basic filesystem structure"
+ install_rpm filesystem
+
+ dinfo "Install SUSE systemd"
+
+ pkgs=(
+ systemd
+ systemd-boot
+ systemd-container
+ systemd-coredump
+ systemd-experimental
+ systemd-homed
+ systemd-journal-remote
+ # Since commit fb6f25d7b979134a, systemd-resolved, which is shipped by
+ # systemd-network sub-package on openSUSE, has its own testsuite.
+ systemd-network
+ systemd-portable
+ udev
+ )
+
+ for p in "${pkgs[@]}"; do
+ rpm -q "$p" &>/dev/null || continue
+
+ install_rpm "$p"
+ done
+
+ dinfo "Install the data needed by the tests at runtime"
+ inst_recursive "${SOURCE_DIR}/testdata"
+ inst_recursive "${SOURCE_DIR}/unit-tests/manual"
+
+ # On openSUSE, this directory is not created at package install, at least
+ # for now.
+ mkdir -p "$initdir/var/log/journal/remote"
+}
+
+install_fedora_systemd() {
+ local required_packages=(
+ systemd
+ systemd-container
+ systemd-libs
+ systemd-pam
+ systemd-tests
+ systemd-udev
+ )
+ local optional_packages=(
+ systemd-boot-unsigned
+ systemd-bootchart
+ systemd-journal-remote
+ systemd-networkd
+ systemd-oomd-defaults
+ systemd-resolved
+ )
+ local package
+
+ for package in "${required_packages[@]}"; do
+ install_rpm "$package"
+ done
+
+ for package in "${optional_packages[@]}"; do
+ rpm -q "$package" >/dev/null || continue
+ install_rpm "$package"
+ done
+}
+
+install_distro_systemd() {
+ dinfo "Install distro systemd"
+
+ if get_bool "$LOOKS_LIKE_DEBIAN"; then
+ install_debian_systemd
+ elif get_bool "$LOOKS_LIKE_SUSE"; then
+ install_suse_systemd
+ elif get_bool "$LOOKS_LIKE_FEDORA"; then
+ install_fedora_systemd
+ else
+ dfatal "NO_BUILD not supported for this distro"
+ exit 1
+ fi
+}
+
+install_systemd() {
+ dinfo "Install systemd"
+ if get_bool "$NO_BUILD"; then
+ install_distro_systemd
+ else
+ install_compiled_systemd
+ fi
+
+ # Remove unneeded documentation
+ rm -fr "${initdir:?}"/usr/share/{man,doc}
+
+ # Enable debug logging in PID1
+ mkdir -p "$initdir/etc/systemd/system.conf.d/"
+ echo -ne "[Manager]\nLogLevel=debug\n" >"$initdir/etc/systemd/system.conf.d/10-log-level.conf"
+ if [[ -n "$TEST_SYSTEMD_LOG_LEVEL" ]]; then
+ echo DefaultEnvironment=SYSTEMD_LOG_LEVEL="$TEST_SYSTEMD_LOG_LEVEL" >>"$initdir/etc/systemd/system.conf.d/99-log-level.conf"
+ fi
+ # Enable debug logging for user instances as well
+ mkdir -p "$initdir/etc/systemd/user.conf.d/"
+ echo -ne "[Manager]\nLogLevel=debug\n" >"$initdir/etc/systemd/user.conf.d/10-log-level.conf"
+ # Store coredumps in journal
+ mkdir -p "$initdir/etc/systemd/coredump.conf.d/"
+ echo -ne "[Coredump]\nStorage=journal\n" >"$initdir/etc/systemd/coredump.conf.d/10-storage-journal.conf"
+ # Propagate SYSTEMD_UNIT_PATH to user systemd managers
+ mkdir -p "$initdir/etc/systemd/system/user@.service.d/"
+ echo -ne "[Service]\nPassEnvironment=SYSTEMD_UNIT_PATH\n" >"$initdir/etc/systemd/system/user@.service.d/99-SYSTEMD_UNIT_PATH.conf"
+
+ # When built with gcov, disable ProtectSystem= and ProtectHome= in the test
+ # images, since it prevents gcov to write the coverage reports (*.gcda
+ # files)
+ if get_bool "$IS_BUILT_WITH_COVERAGE"; then
+ mkdir -p "$initdir/etc/systemd/system/service.d/"
+ echo -ne "[Service]\nProtectSystem=no\nProtectHome=no\n" >"$initdir/etc/systemd/system/service.d/99-gcov-override.conf"
+ # Similarly, set ReadWritePaths= to the $BUILD_DIR in the test image to make the coverage work with
+ # units using DynamicUser=yes. Do this only for services with test- prefix and a couple of
+ # known-to-use DynamicUser=yes services, as setting this system-wide has many undesirable
+ # side-effects, as it creates its own namespace.
+ for service in test- systemd-journal-{gatewayd,upload}; do
+ mkdir -p "$initdir/etc/systemd/system/$service.service.d/"
+ echo -ne "[Service]\nReadWritePaths=${BUILD_DIR:?}\n" >"$initdir/etc/systemd/system/$service.service.d/99-gcov-rwpaths-override.conf"
+ done
+ # Ditto, but for the user daemon
+ mkdir -p "$initdir/etc/systemd/user/test-.service.d/"
+ echo -ne "[Service]\nReadWritePaths=${BUILD_DIR:?}\n" >"$initdir/etc/systemd/user/test-.service.d/99-gcov-rwpaths-override.conf"
+ # Bind the $BUILD_DIR into nspawn containers that are executed using
+ # machinectl. Unfortunately, the .nspawn files don't support drop-ins
+ # so we have to inject the bind mount directly into
+ # the systemd-nspawn@.service unit.
+ cp "$initdir/usr/lib/systemd/system/systemd-nspawn@.service" "$initdir/etc/systemd/system/systemd-nspawn@.service"
+ sed -ri "s/^ExecStart=.+$/& --bind=${BUILD_DIR//\//\\\/}/" "$initdir/etc/systemd/system/systemd-nspawn@.service"
+ # Pass the $BUILD_DIR as $COVERAGE_BUILD_DIR env variable to the system
+ # manager, similarly to what we do with $ASAN_RT_PATH during sanitized
+ # builds
+ mkdir -p "$initdir/etc/systemd/system.conf.d/"
+ echo -ne "[Manager]\nDefaultEnvironment=COVERAGE_BUILD_DIR=$BUILD_DIR\n" >"$initdir/etc/systemd/system.conf.d/99-COVERAGE_BUILD_DIR.conf"
+ fi
+
+ # If we're built with -Dportabled=false, tests with systemd-analyze
+ # --profile will fail. Since we need just the profile (text) files, let's
+ # copy them into the image if they don't exist there.
+ local portable_dir="${initdir:?}${ROOTLIBDIR:?}/portable"
+ if [[ ! -d "$portable_dir/profile/strict" ]]; then
+ dinfo "Couldn't find portable profiles in the test image"
+ dinfo "Copying them directly from the source tree"
+ mkdir -p "$portable_dir"
+ cp -frv "${SOURCE_DIR:?}/src/portable/profile" "$portable_dir"
+ fi
+}
+
+get_ldpath() {
+ local rpath
+ rpath="$(objdump -p "${1:?}" 2>/dev/null | awk "/R(UN)?PATH/ { print \"$initdir\" \$2 }" | paste -sd :)"
+
+ if [ -z "$rpath" ] ; then
+ echo "$BUILD_DIR"
+ else
+ echo "$rpath"
+ fi
+}
+
+install_missing_libraries() {
+ dinfo "Install missing libraries"
+ # install possible missing libraries
+ for i in "${initdir:?}"{,/usr}/{sbin,bin}/* "$initdir"{,/usr}/lib/systemd/{,tests/unit-tests/{,manual/,unsafe/}}*; do
+ LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$(get_ldpath "$i")" inst_libs "$i"
+ done
+
+ # Install libgcc_s.so if available, since it's dlopen()ed by libpthread
+ # and might cause unexpected failures during pthread_exit()/pthread_cancel()
+ # if not present
+ # See: https://github.com/systemd/systemd/pull/23858
+ while read -r libgcc_s; do
+ [[ -e "$libgcc_s" ]] && inst_library "$libgcc_s"
+ done < <(ldconfig -p | awk '/\/libgcc_s.so.1$/ { print $4 }')
+
+ local lib path
+ # A number of dependencies is now optional via dlopen, so the install
+ # script will not pick them up, since it looks at linkage.
+ for lib in libcryptsetup libidn libidn2 pwquality libqrencode tss2-esys tss2-rc tss2-mu tss2-tcti-device libfido2 libbpf libelf libdw xkbcommon p11-kit-1; do
+ ddebug "Searching for $lib via pkg-config"
+ if pkg-config --exists "$lib"; then
+ path="$(pkg-config --variable=libdir "$lib")"
+ if [ -z "${path}" ]; then
+ ddebug "$lib.pc does not contain a libdir variable, skipping"
+ continue
+ fi
+
+ if ! [[ ${lib} =~ ^lib ]]; then
+ lib="lib${lib}"
+ fi
+ # p11-kit-1's .so doesn't have the API level in the name
+ if [[ ${lib} =~ p11-kit-1$ ]]; then
+ lib="libp11-kit"
+ fi
+ # Some pkg-config files are broken and give out the wrong paths
+ # (eg: libcryptsetup), so just ignore them
+ inst_libs "${path}/${lib}.so" || true
+ inst_library "${path}/${lib}.so" || true
+
+ if [[ "$lib" == "libxkbcommon" ]]; then
+ install_x11_keymaps full
+ fi
+ else
+ ddebug "$lib.pc not found, skipping"
+ continue
+ fi
+ done
+
+ # Install extra openssl 3 stuff
+ path="$(pkg-config --variable=libdir libcrypto)"
+ inst_simple "${path}/ossl-modules/legacy.so" || true
+ inst_simple "${path}/ossl-modules/fips.so" || true
+ inst_simple "${path}/engines-3/afalg.so" || true
+ inst_simple "${path}/engines-3/capi.so" || true
+ inst_simple "${path}/engines-3/loader_attic.so" || true
+ inst_simple "${path}/engines-3/padlock.so" || true
+
+ # Binaries from mtools depend on the gconv modules to translate between codepages. Because there's no
+ # pkg-config file for these, we copy every gconv/ directory we can find in /usr/lib and /usr/lib64.
+ # shellcheck disable=SC2046
+ inst_recursive $(find /usr/lib* -name gconv 2>/dev/null)
+}
+
+cleanup_loopdev() {
+ if [ -n "${LOOPDEV:=}" ]; then
+ ddebug "losetup -d $LOOPDEV"
+ losetup -d "${LOOPDEV}"
+ unset LOOPDEV
+ fi
+}
+
+add_at_exit_handler cleanup_loopdev
+
+create_empty_image() {
+ if [[ -z "${IMAGE_NAME:=}" ]]; then
+ echo "create_empty_image: \$IMAGE_NAME not set"
+ exit 1
+ fi
+
+ # Partition sizes are in MiBs
+ local root_size=768
+ local data_size=100
+ local esp_size=128
+ local boot_size=128
+ local total=
+ if ! get_bool "$NO_BUILD"; then
+ if meson configure "${BUILD_DIR:?}" | grep 'static-lib\|standalone-binaries' | awk '{ print $2 }' | grep -q 'true'; then
+ root_size=$((root_size + 200))
+ fi
+ if meson configure "${BUILD_DIR:?}" | grep 'link-.*-shared' | awk '{ print $2 }' | grep -q 'false'; then
+ root_size=$((root_size + 200))
+ fi
+ if get_bool "$IS_BUILT_WITH_COVERAGE"; then
+ root_size=$((root_size + 250))
+ fi
+ if get_bool "$IS_BUILT_WITH_ASAN"; then
+ root_size=$((root_size * 2))
+ fi
+ fi
+
+ if [[ "${IMAGE_ADDITIONAL_ROOT_SIZE:-0}" -gt 0 ]]; then
+ root_size=$((root_size + IMAGE_ADDITIONAL_ROOT_SIZE))
+ fi
+ if [[ "${IMAGE_ADDITIONAL_DATA_SIZE:-0}" -gt 0 ]]; then
+ data_size=$((data_size + IMAGE_ADDITIONAL_DATA_SIZE))
+ fi
+
+ total=$((root_size + data_size + esp_size + boot_size))
+
+ echo "Setting up ${IMAGE_PUBLIC:?} (${total} MB)"
+ rm -f "${IMAGE_PRIVATE:?}" "$IMAGE_PUBLIC"
+
+ # Create the blank file to use as a root filesystem
+ truncate -s "${total}M" "$IMAGE_PUBLIC"
+
+ LOOPDEV="$(losetup --show -P -f "$IMAGE_PUBLIC")"
+ [[ -b "$LOOPDEV" ]] || return 1
+ # Create two partitions - a root one and a data one (utilized by some tests)
+ sfdisk "$LOOPDEV" <<EOF
+label: gpt
+type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B name=esp size=${esp_size}M
+type=0FC63DAF-8483-4772-8E79-3D69D8477DE4 name=root size=${root_size}M bootable
+type=BC13C2FF-59E6-4262-A352-B275FD6F7172 name=boot size=${boot_size}M
+type=0FC63DAF-8483-4772-8E79-3D69D8477DE4 name=data
+EOF
+
+ udevadm settle
+
+ if ! mkfs -t vfat "${LOOPDEV}p1"; then
+ dfatal "Failed to mkfs -t vfat ${LOOPDEV}p1"
+ exit 1
+ fi
+
+ local label=(-L systemd_boot)
+ # mkfs.reiserfs doesn't know -L. so, use --label instead
+ [[ "$FSTYPE" == "reiserfs" ]] && label=(--label systemd_boot)
+ if ! mkfs -t "${FSTYPE}" "${label[@]}" "${LOOPDEV}p2" -q; then
+ dfatal "Failed to mkfs -t ${FSTYPE} ${label[*]} ${LOOPDEV}p2 -q"
+ exit 1
+ fi
+
+ local label=(-L xbootldr)
+ [[ "$FSTYPE" == "reiserfs" ]] && label=(--label xbootldr)
+ if ! mkfs -t "${FSTYPE}" "${label[@]}" "${LOOPDEV}p3" -q; then
+ dfatal "Failed to mkfs -t ${FSTYPE} ${label[*]} ${LOOPDEV}p3 -q"
+ exit 1
+ fi
+}
+
+mount_initdir() {
+ if [ -z "${LOOPDEV:=}" ]; then
+ [ -e "${IMAGE_PRIVATE:?}" ] && image="$IMAGE_PRIVATE" || image="${IMAGE_PUBLIC:?}"
+ LOOPDEV="$(losetup --show -P -f "$image")"
+ [ -b "$LOOPDEV" ] || return 1
+
+ udevadm settle
+ fi
+
+ if ! mountpoint -q "${initdir:?}"; then
+ mkdir -p "$initdir"
+ mount "${LOOPDEV}p2" "$initdir"
+ TEST_SETUP_CLEANUP_ROOTDIR=1
+ fi
+}
+
+cleanup_initdir() {
+ # only umount if create_empty_image_rootdir() was called to mount it
+ if get_bool "$TEST_SETUP_CLEANUP_ROOTDIR"; then
+ _umount_dir "${initdir:?}"
+ fi
+}
+
+umount_loopback() {
+ # unmount the loopback device from all places. Otherwise we risk file
+ # system corruption.
+ for device in $(losetup -l | awk '$6=="'"${IMAGE_PUBLIC:?}"'" {print $1}'); do
+ ddebug "Unmounting all uses of $device"
+ mount | awk '/^'"${device}"'p/{print $1}' | xargs --no-run-if-empty umount -v
+ done
+}
+
+create_empty_image_rootdir() {
+ create_empty_image
+ mount_initdir
+}
+
+check_asan_reports() {
+ local ret=0
+ local root="${1:?}"
+ local log report
+
+ if get_bool "$IS_BUILT_WITH_ASAN"; then
+ ls -l "$root"
+ if [[ -e "$root/systemd.asan.log.1" ]]; then
+ cat "$root/systemd.asan.log.1"
+ ret=$((ret+1))
+ fi
+
+ for log in pid1 journald; do
+ report="$(find "$root" -name "systemd-$log.*san.log*" -exec cat {} \;)"
+ if [[ -n "$report" ]]; then
+ printf "%s\n" "$report"
+ # shellcheck disable=SC2015
+ [[ "$log" == journald ]] && cat "$root/systemd-journald.out" || :
+ ret=$((ret+1))
+ fi
+ done
+
+ # May 08 13:23:31 H testleak[2907148]: SUMMARY: AddressSanitizer: 4 byte(s) leaked in 1 allocation(s).
+ pids="$(
+ "$JOURNALCTL" -D "$root/var/log/journal" --grep 'SUMMARY: .*Sanitizer:' |
+ grep -v -E 'dbus-daemon|dbus-broker-launch' |
+ sed -r -n 's/.* .+\[([0-9]+)\]: SUMMARY:.*/\1/p'
+ )"
+
+ if [[ -n "$pids" ]]; then
+ ret=$((ret+1))
+ for pid in $pids; do
+ "$JOURNALCTL" -D "$root/var/log/journal" _PID="$pid" --no-pager
+ done
+ fi
+ fi
+
+ return $ret
+}
+
+check_coverage_reports() {
+ local root="${1:?}"
+
+ if get_bool "$NO_BUILD"; then
+ return 0
+ fi
+ if ! get_bool "$IS_BUILT_WITH_COVERAGE"; then
+ return 0
+ fi
+
+ if [ -n "${ARTIFACT_DIRECTORY}" ]; then
+ dest="${ARTIFACT_DIRECTORY}/${testname:?}.coverage-info"
+ else
+ dest="${TESTDIR:?}/coverage-info"
+ fi
+
+ if [[ ! -e "${TESTDIR:?}/coverage-base" ]]; then
+ # This shouldn't happen, as the report is generated during the setup
+ # phase (test_setup()).
+ derror "Missing base coverage report"
+ return 1
+ fi
+
+ # Create a coverage report that will later be uploaded. Remove info about system
+ # libraries/headers and generated files, as we don't really care about them.
+ lcov --directory "${root}/${BUILD_DIR:?}" --capture --output-file "${dest}.new"
+ if [[ -f "$dest" ]]; then
+ # If the destination report file already exists, don't overwrite it, but
+ # merge it with the already present one - this usually happens when
+ # running both "parts" of a test in one run (the qemu and the nspawn part).
+ lcov --add-tracefile "${dest}" --add-tracefile "${dest}.new" -o "${dest}"
+ else
+ # If there's no prior coverage report, merge the new one with the base
+ # report we did during the setup phase (see test_setup()).
+ lcov --add-tracefile "${TESTDIR:?}/coverage-base" --add-tracefile "${dest}.new" -o "${dest}"
+ fi
+ lcov --remove "$dest" -o "$dest" '/usr/include/*' '/usr/lib/*' "${BUILD_DIR:?}/*"
+ rm -f "${dest}.new"
+
+ # If the test logs contain lines like:
+ #
+ # ...systemd-resolved[735885]: profiling:/systemd-meson-build/src/shared/libsystemd-shared-250.a.p/base-filesystem.c.gcda:Cannot open
+ #
+ # it means we're possibly missing some coverage since gcov can't write the stats,
+ # usually due to the sandbox being too restrictive (e.g. ProtectSystem=yes,
+ # ProtectHome=yes) or the $BUILD_DIR being inaccessible to non-root users - see
+ # `setfacl` stuff in install_compiled_systemd().
+ #
+ # Also, a note: some tests, like TEST-46, overmount /home with tmpfs, which
+ # means if your build dir is under /home/your-user (which is usually the
+ # case) you might get bogus errors and missing coverage.
+ if ! get_bool "${IGNORE_MISSING_COVERAGE:=}" && \
+ "${JOURNALCTL:?}" -q --no-pager -D "${root:?}/var/log/journal" --grep "profiling:.+?gcda:[Cc]annot open"; then
+ derror "Detected possibly missing coverage, check the journal"
+ return 1
+ fi
+
+ return 0
+}
+
+save_journal() {
+ local source_dir="${1:?}"
+ local state="${2:?}"
+ # Default to always saving journal
+ local save="yes"
+ local dest_dir dest_name dest
+
+ if [[ "${TEST_SAVE_JOURNAL:-}" == "no" ]]; then
+ save="no"
+ elif [[ "${TEST_SAVE_JOURNAL:-}" == "fail" && "$state" -eq 0 ]]; then
+ save="no"
+ fi
+
+ if [[ -n "${ARTIFACT_DIRECTORY:-}" ]]; then
+ dest_dir="$ARTIFACT_DIRECTORY"
+ dest_name="${testname:?}.journal"
+ else
+ dest_dir="${TESTDIR:?}"
+ dest_name="system.journal"
+ fi
+
+ # Show messages from the testsuite-XX.service or messages with priority "warning" and higher
+ echo " --- $source_dir ---"
+ "$JOURNALCTL" --all --no-pager --no-hostname -o short-monotonic -D "$source_dir" \
+ _SYSTEMD_UNIT="testsuite-${TESTID:?}.service" + PRIORITY=4 + PRIORITY=3 + PRIORITY=2 + PRIORITY=1 + PRIORITY=0
+
+ if get_bool "$save"; then
+ # If we don't have systemd-journal-remote copy all journals from /var/log/journal/
+ # to $dest_dir/journals/ as is, otherwise merge all journals into a single .journal
+ # file
+ if [[ -z "${SYSTEMD_JOURNAL_REMOTE:-}" ]]; then
+ dest="$dest_dir/journals"
+ mkdir -p "$dest"
+ cp -a "$source_dir/*" "$dest/"
+ else
+ dest="$dest_dir/$dest_name"
+ "$SYSTEMD_JOURNAL_REMOTE" -o "$dest" --getter="$JOURNALCTL -o export -D $source_dir"
+ fi
+
+ if [[ -n "${SUDO_USER:-}" ]]; then
+ setfacl -R -m "user:$SUDO_USER:r-X" "$dest"
+ fi
+
+ # we want to print this sometime later, so save this in a variable
+ JOURNAL_LIST="$(ls -lR "$dest")"
+ fi
+
+ rm -rf "${source_dir:?}"/*
+}
+
+check_result_common() {
+ local workspace="${1:?}"
+ local ret
+
+ if [ -s "$workspace/failed" ]; then
+ # Non-empty …/failed has highest priority
+ cp -a "$workspace/failed" "${TESTDIR:?}/"
+ if [ -n "${SUDO_USER}" ]; then
+ setfacl -m "user:${SUDO_USER:?}:r-X" "${TESTDIR:?}/"failed
+ fi
+ ret=1
+ elif get_bool "$TIMED_OUT"; then
+ echo "(timeout)" >"${TESTDIR:?}/failed"
+ ret=2
+ elif [ -e "$workspace/testok" ]; then
+ # …/testok always counts (but with lower priority than …/failed)
+ ret=0
+ elif [ -e "$workspace/skipped" ]; then
+ # …/skipped always counts (a message is expected)
+ echo "${TESTNAME:?} was skipped:"
+ cat "$workspace/skipped"
+ ret=0
+ else
+ echo "(failed; see logs)" >"${TESTDIR:?}/failed"
+ ret=3
+ fi
+
+ check_asan_reports "$workspace" || ret=4
+
+ check_coverage_reports "$workspace" || ret=5
+
+ save_journal "$workspace/var/log/journal" $ret
+
+ if [ -d "${ARTIFACT_DIRECTORY}" ] && [ -f "$workspace/strace.out" ]; then
+ cp "$workspace/strace.out" "${ARTIFACT_DIRECTORY}/"
+ fi
+
+ if [ ${ret:?} != 0 ] && [ -f "$TESTDIR/failed" ]; then
+ echo -n "${TESTNAME:?}: "
+ cat "$TESTDIR/failed"
+ fi
+ echo "${JOURNAL_LIST:-"No journals were saved"}"
+
+ return ${ret:?}
+}
+
+check_result_nspawn() {
+ local workspace="${1:?}"
+ local ret=0
+
+ # Run a test-specific checks if defined by check_result_nspawn_hook()
+ if declare -F check_result_nspawn_hook >/dev/null; then
+ if ! check_result_nspawn_hook "${workspace}"; then
+ derror "check_result_nspawn_hook() returned with EC > 0"
+ ret=4
+ fi
+ fi
+
+ check_result_common "${workspace}" || ret=$?
+
+ _umount_dir "${initdir:?}"
+
+ return $ret
+}
+
+# can be overridden in specific test
+check_result_qemu() {
+ local ret=0
+ mount_initdir
+
+ # Run a test-specific checks if defined by check_result_qemu_hook()
+ if declare -F check_result_qemu_hook >/dev/null; then
+ if ! check_result_qemu_hook "${initdir:?}"; then
+ derror "check_result_qemu_hook() returned with EC > 0"
+ ret=4
+ fi
+ fi
+
+ check_result_common "${initdir:?}" || ret=$?
+
+ _umount_dir "${initdir:?}"
+
+ return $ret
+}
+
+check_result_nspawn_unittests() {
+ local workspace="${1:?}"
+ local ret=1
+
+ [[ -e "$workspace/testok" ]] && ret=0
+
+ if [[ -s "$workspace/failed" ]]; then
+ ret=$((ret + 1))
+ echo "=== Failed test log ==="
+ cat "$workspace/failed"
+ else
+ if [[ -s "$workspace/skipped" ]]; then
+ echo "=== Skipped test log =="
+ cat "$workspace/skipped"
+ # We might have only skipped tests - that should not fail the job
+ ret=0
+ fi
+ if [[ -s "$workspace/testok" ]]; then
+ echo "=== Passed tests ==="
+ cat "$workspace/testok"
+ fi
+ fi
+
+ get_bool "${TIMED_OUT:=}" && ret=1
+ check_coverage_reports "$workspace" || ret=5
+
+ save_journal "$workspace/var/log/journal" $ret
+ echo "${JOURNAL_LIST:-"No journals were saved"}"
+
+ _umount_dir "${initdir:?}"
+
+ return $ret
+}
+
+check_result_qemu_unittests() {
+ local ret=1
+
+ mount_initdir
+ [[ -e "${initdir:?}/testok" ]] && ret=0
+
+ if [[ -s "$initdir/failed" ]]; then
+ ret=$((ret + 1))
+ echo "=== Failed test log ==="
+ cat "$initdir/failed"
+ else
+ if [[ -s "$initdir/skipped" ]]; then
+ echo "=== Skipped test log =="
+ cat "$initdir/skipped"
+ # We might have only skipped tests - that should not fail the job
+ ret=0
+ fi
+ if [[ -s "$initdir/testok" ]]; then
+ echo "=== Passed tests ==="
+ cat "$initdir/testok"
+ fi
+ fi
+
+ get_bool "${TIMED_OUT:=}" && ret=1
+ check_coverage_reports "$initdir" || ret=5
+
+ save_journal "$initdir/var/log/journal" $ret
+ echo "${JOURNAL_LIST:-"No journals were saved"}"
+
+ _umount_dir "$initdir"
+
+ return $ret
+}
+
+create_rc_local() {
+ dinfo "Create rc.local"
+ mkdir -p "${initdir:?}/etc/rc.d"
+ cat >"$initdir/etc/rc.d/rc.local" <<EOF
+#!/usr/bin/env bash
+exit 0
+EOF
+ chmod 0755 "$initdir/etc/rc.d/rc.local"
+}
+
+install_execs() {
+ ddebug "Install executables from the service files"
+
+ local pkg_config_path="${BUILD_DIR:?}/src/core/"
+ local systemunitdir userunitdir exe
+ systemunitdir="$(PKG_CONFIG_PATH="$pkg_config_path" pkg-config --variable=systemdsystemunitdir systemd)"
+ userunitdir="$(PKG_CONFIG_PATH="$pkg_config_path" pkg-config --variable=systemduserunitdir systemd)"
+ while read -r exe; do
+ # some {rc,halt}.local scripts and programs are okay to not exist, the rest should
+ # also, plymouth is pulled in by rescue.service, but even there the exit code
+ # is ignored; as it's not present on some distros, don't fail if it doesn't exist
+ dinfo "Attempting to install $exe (based on unit file reference)"
+ inst "$exe" || [ "${exe%.local}" != "$exe" ] || [ "${exe%systemd-update-done}" != "$exe" ] || [ "${exe##*/}" == "plymouth" ]
+ done < <(sed -r -n 's|^Exec[a-zA-Z]*=[@+!-]*([^ ]+).*|\1|gp' "${initdir:?}"/{"$systemunitdir","$userunitdir"}/*.service | sort -u)
+}
+
+generate_module_dependencies() {
+ dinfo "Generate modules dependencies"
+ if [[ -d "${initdir:?}/lib/modules/${KERNEL_VER:?}" ]] && \
+ ! depmod -a -b "$initdir" "$KERNEL_VER"; then
+ dfatal "\"depmod -a $KERNEL_VER\" failed."
+ exit 1
+ fi
+}
+
+install_depmod_files() {
+ dinfo "Install depmod files"
+ inst "/lib/modules/${KERNEL_VER:?}/modules.order"
+ inst "/lib/modules/$KERNEL_VER/modules.builtin"
+}
+
+install_plymouth() {
+ dinfo "Install plymouth"
+ # install plymouth, if found... else remove plymouth service files
+ # if [ -x /usr/libexec/plymouth/plymouth-populate-initrd ]; then
+ # PLYMOUTH_POPULATE_SOURCE_FUNCTIONS="$TEST_BASE_DIR/test-functions" \
+ # /usr/libexec/plymouth/plymouth-populate-initrd -t $initdir
+ # image_install plymouth plymouthd
+ # else
+ rm -f "${initdir:?}"/{usr/lib,lib,etc}/systemd/system/plymouth* "$initdir"/{usr/lib,lib,etc}/systemd/system/*/plymouth*
+ # fi
+}
+
+install_haveged() {
+ # If haveged is installed, it's probably included in initrd and needs to be
+ # installed in the image too.
+ if [ -x /usr/sbin/haveged ]; then
+ dinfo "Install haveged files"
+ inst /usr/sbin/haveged
+ for u in /usr/lib/systemd/system/haveged*; do
+ inst "$u"
+ done
+ fi
+}
+
+install_ld_so_conf() {
+ dinfo "Install /etc/ld.so.conf*"
+ cp -a /etc/ld.so.conf* "${initdir:?}/etc"
+ ldconfig -r "$initdir"
+}
+
+install_testuser() {
+ dinfo "Set up a test user"
+ # create unprivileged user for user manager tests
+ mkdir -p "${initdir:?}/etc/sysusers.d"
+ cat >"$initdir/etc/sysusers.d/testuser.conf" <<EOF
+u testuser 4711 "Test User" /home/testuser
+EOF
+
+ mkdir -p "$initdir/home/testuser"
+ chmod 0700 "$initdir/home/testuser"
+ chown 4711:4711 "$initdir/home/testuser"
+}
+
+install_config_files() {
+ dinfo "Install config files"
+ inst /etc/sysconfig/init || :
+ inst /etc/passwd
+ inst /etc/shadow
+ inst_any /etc/login.defs /usr/etc/login.defs
+ inst /etc/group
+ inst /etc/shells
+ inst_any /etc/nsswitch.conf /usr/etc/nsswitch.conf
+ inst /etc/pam.conf || :
+ inst_any /etc/os-release /usr/lib/os-release
+ inst /etc/localtime
+ # we want an empty environment
+ : >"${initdir:?}/etc/environment"
+ : >"$initdir/etc/machine-id"
+ : >"$initdir/etc/resolv.conf"
+
+ # set the hostname
+ echo 'H' >"$initdir/etc/hostname"
+
+ # let's set up just one image with the traditional verbose output
+ if [ "${IMAGE_NAME:?}" != "basic" ]; then
+ mkdir -p "$initdir/etc/systemd/system.conf.d"
+ echo -e '[Manager]\nStatusUnitFormat=name' >"$initdir/etc/systemd/system.conf.d/status.conf"
+ fi
+}
+
+install_basic_tools() {
+ dinfo "Install basic tools"
+ image_install "${BASICTOOLS[@]}"
+ image_install -o sushell
+ # in Debian ldconfig is just a shell script wrapper around ldconfig.real
+ image_install -o ldconfig.real
+}
+
+install_debug_tools() {
+ dinfo "Install debug tools"
+ image_install -o "${DEBUGTOOLS[@]}"
+
+ if get_bool "$INTERACTIVE_DEBUG"; then
+ # Set default TERM from vt220 to linux, so at least basic key shortcuts work
+ local getty_override="${initdir:?}/etc/systemd/system/serial-getty@.service.d"
+ mkdir -p "$getty_override"
+ echo -e "[Service]\nEnvironment=TERM=linux" >"$getty_override/default-TERM.conf"
+ echo 'export TERM=linux' >>"$initdir/etc/profile"
+
+ if command -v resize >/dev/null; then
+ image_install resize
+ echo "resize" >>"$initdir/etc/profile"
+ fi
+
+ # Sometimes we might end up with plymouthd still running (especially
+ # with the initrd -> asan_wrapper -> systemd transition), which will eat
+ # our inputs and make debugging via tty impossible. Let's fix this by
+ # killing plymouthd explicitly for the interactive sessions.
+ # Note: we can't use pkill/pidof/etc. here due to a bug in libasan, see:
+ # - https://github.com/llvm/llvm-project/issues/49223
+ # - https://bugzilla.redhat.com/show_bug.cgi?id=2098125
+ local plymouth_unit="${initdir:?}/etc/systemd/system/kill-plymouth.service"
+ cat >"$plymouth_unit" <<EOF
+[Unit]
+After=multi-user.target
+
+[Service]
+ExecStart=sh -c 'killall --verbose plymouthd || :'
+
+[Install]
+WantedBy=multi-user.target
+EOF
+ "${SYSTEMCTL:?}" enable --root "${initdir:?}" kill-plymouth.service
+ fi
+}
+
+install_libnss() {
+ dinfo "Install libnss"
+ # install libnss_files for login
+ local NSS_LIBS
+ mapfile -t NSS_LIBS < <(LD_DEBUG=files getent passwd 2>&1 >/dev/null | sed -n '/calling init: .*libnss_/ {s!^.* /!/!; p}')
+ if [[ ${#NSS_LIBS[@]} -gt 0 ]]; then
+ image_install "${NSS_LIBS[@]}"
+ fi
+}
+
+install_dbus() {
+ dinfo "Install dbus"
+ inst "${ROOTLIBDIR:?}/system/dbus.socket"
+
+ # Newer Fedora versions use dbus-broker by default. Let's install it if it's available.
+ if [ -f "$ROOTLIBDIR/system/dbus-broker.service" ]; then
+ inst "$ROOTLIBDIR/system/dbus-broker.service"
+ inst /usr/bin/dbus-broker
+ inst /usr/bin/dbus-broker-launch
+ image_install -o {/etc,/usr/lib}/systemd/system/dbus.service
+ elif [ -f "$ROOTLIBDIR/system/dbus-daemon.service" ]; then
+ # Fedora rawhide replaced dbus.service with dbus-daemon.service
+ inst "$ROOTLIBDIR/system/dbus-daemon.service"
+ # Alias symlink
+ image_install -o {/etc,/usr/lib}/systemd/system/dbus.service
+ else
+ inst "$ROOTLIBDIR/system/dbus.service"
+ fi
+
+ while read -r file; do
+ inst "$file"
+ done < <(find /etc/dbus-1 /usr/share/dbus-1 -xtype f 2>/dev/null)
+
+ # setup policy for Type=dbus test
+ mkdir -p "${initdir:?}/etc/dbus-1/system.d"
+ cat >"$initdir/etc/dbus-1/system.d/systemd.test.ExecStopPost.conf" <<EOF
+<?xml version="1.0"?>
+<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
+ "https://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+ <policy user="root">
+ <allow own="systemd.test.ExecStopPost"/>
+ </policy>
+</busconfig>
+EOF
+
+ # If we run without KVM, bump the service start timeout
+ if ! get_bool "$QEMU_KVM"; then
+ cat >"$initdir/etc/dbus-1/system.d/service.timeout.conf" <<EOF
+<?xml version="1.0"?>
+<!DOCTYPE busconfig PUBLIC "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
+ "https://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+<busconfig>
+ <limit name="service_start_timeout">120000</limit>
+</busconfig>
+EOF
+ # Bump the client-side timeout in sd-bus as well
+ mkdir -p "$initdir/etc/systemd/system.conf.d"
+ echo -e '[Manager]\nDefaultEnvironment=SYSTEMD_BUS_TIMEOUT=120' >"$initdir/etc/systemd/system.conf.d/bus-timeout.conf"
+ fi
+}
+
+install_user_dbus() {
+ dinfo "Install user dbus"
+ local userunitdir
+ if ! userunitdir="$(pkg-config --variable=systemduserunitdir systemd)"; then
+ dwarn "WARNING! Cannot determine userunitdir from pkg-config, assuming /usr/lib/systemd/user"
+ userunitdir=/usr/lib/systemd/user
+ fi
+
+ inst "$userunitdir/dbus.socket"
+ inst_symlink "$userunitdir/sockets.target.wants/dbus.socket" || inst_symlink /etc/systemd/user/sockets.target.wants/dbus.socket
+
+ # Append the After= dependency on dbus in case it isn't already set up
+ mkdir -p "${initdir:?}/etc/systemd/system/user@.service.d/"
+ cat >"$initdir/etc/systemd/system/user@.service.d/dbus.conf" <<EOF
+[Unit]
+After=dbus.service
+EOF
+
+ # Newer Fedora versions use dbus-broker by default. Let's install it if it's available.
+ if [ -f "$userunitdir/dbus-broker.service" ]; then
+ inst "$userunitdir/dbus-broker.service"
+ image_install -o {/etc,/usr/lib}/systemd/user/dbus.service
+ elif [ -f "${ROOTLIBDIR:?}/system/dbus-daemon.service" ]; then
+ # Fedora rawhide replaced dbus.service with dbus-daemon.service
+ inst "$userunitdir/dbus-daemon.service"
+ # Alias symlink
+ image_install -o {/etc,/usr/lib}/systemd/user/dbus.service
+ else
+ inst "$userunitdir/dbus.service"
+ fi
+}
+
+install_pam() {
+ dinfo "Install PAM"
+ local paths=()
+
+ if get_bool "$LOOKS_LIKE_DEBIAN" && type -p dpkg-architecture &>/dev/null; then
+ paths+=("/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)/security")
+ else
+ paths+=(/lib*/security)
+ fi
+
+ for d in /etc/pam.d /{usr/,}etc/security /usr/{etc,lib}/pam.d; do
+ [ -d "$d" ] && paths+=("$d")
+ done
+
+ while read -r file; do
+ inst "$file"
+ done < <(find "${paths[@]}" -xtype f)
+
+ # pam_unix depends on unix_chkpwd.
+ # see http://www.linux-pam.org/Linux-PAM-html/sag-pam_unix.html
+ image_install -o unix_chkpwd
+
+ # set empty root password for easy debugging
+ sed -i 's/^root:x:/root::/' "${initdir:?}/etc/passwd"
+
+ # And make sure pam_unix will accept it by making sure that
+ # the PAM module has the nullok option.
+ for d in /etc/pam.d /usr/{etc,lib}/pam.d; do
+ [ -d "$initdir/$d" ] || continue
+ sed -i '/^auth.*pam_unix.so/s/$/ nullok/' "$initdir/$d"/*
+ done
+}
+
+install_locales() {
+ # install only C.UTF-8 and English locales
+ dinfo "Install locales"
+
+ if command -v meson >/dev/null \
+ && (meson configure "${BUILD_DIR:?}" | grep 'localegen-path */') \
+ || get_bool "$LOOKS_LIKE_DEBIAN"; then
+ # locale-gen support
+ image_install -o locale-gen localedef
+ inst /etc/locale.gen || :
+ inst /usr/share/i18n/SUPPORTED || :
+ inst_recursive /usr/share/i18n/charmaps
+ inst_recursive /usr/share/i18n/locales
+ inst_recursive /usr/share/locale/en*
+ inst_recursive /usr/share/locale/de*
+ image_install /usr/share/locale/locale.alias
+ # locale-gen might either generate each locale separately or merge them
+ # into a single archive
+ if ! (inst_recursive /usr/lib/locale/C.*8 /usr/lib/locale/en_*8 ||
+ image_install /usr/lib/locale/locale-archive); then
+ dfatal "Failed to install required locales"
+ exit 1
+ fi
+ else
+ inst_recursive /usr/lib/locale/C.*8 /usr/lib/locale/en_*8
+ fi
+}
+
+# shellcheck disable=SC2120
+install_keymaps() {
+ local i p
+ local -a prefix=(
+ "/usr/lib"
+ "/usr/share"
+ )
+
+ dinfo "Install console keymaps"
+
+ if (( $# == 0 )); then
+ for p in "${prefix[@]}"; do
+ # The first three paths may be deprecated.
+ # It seems now the last three paths are used by many distributions.
+ for i in \
+ "$p"/kbd/keymaps/include/* \
+ "$p"/kbd/keymaps/i386/include/* \
+ "$p"/kbd/keymaps/i386/qwerty/us.* \
+ "$p"/kbd/keymaps/legacy/include/* \
+ "$p"/kbd/keymaps/legacy/i386/qwerty/us.* \
+ "$p"/kbd/keymaps/xkb/us*; do
+ [[ -f "$i" ]] || continue
+ inst "$i"
+ done
+ done
+ else
+ # When it takes any argument, then install more keymaps.
+ for p in "${prefix[@]}"; do
+ for i in \
+ "$p"/kbd/keymaps/include/* \
+ "$p"/kbd/keymaps/i386/*/* \
+ "$p"/kbd/keymaps/legacy/i386/*/* \
+ "$p"/kbd/keymaps/xkb/*; do
+ [[ -f "$i" ]] || continue
+ inst "$i"
+ done
+ done
+ fi
+}
+
+install_x11_keymaps() {
+ dinfo "Install x11 keymaps"
+
+ if (( $# == 0 )); then
+ # Install only keymap list.
+ inst /usr/share/X11/xkb/rules/base.lst
+ else
+ # When it takes any argument, then install all keymaps.
+ inst_recursive /usr/share/X11/xkb
+ fi
+}
+
+install_zoneinfo() {
+ dinfo "Install time zones"
+ inst_any /usr/share/zoneinfo/Asia/Seoul
+ inst_any /usr/share/zoneinfo/Asia/Vladivostok
+ inst_any /usr/share/zoneinfo/Australia/Sydney
+ inst_any /usr/share/zoneinfo/Europe/Berlin
+ inst_any /usr/share/zoneinfo/Europe/Dublin
+ inst_any /usr/share/zoneinfo/Europe/Kiev
+ inst_any /usr/share/zoneinfo/Pacific/Auckland
+ inst_any /usr/share/zoneinfo/Pacific/Honolulu
+ inst_any /usr/share/zoneinfo/CET
+ inst_any /usr/share/zoneinfo/EET
+ inst_any /usr/share/zoneinfo/UTC
+}
+
+install_fonts() {
+ dinfo "Install system fonts"
+ for i in \
+ /usr/{lib,share}/kbd/consolefonts/eurlatgr* \
+ /usr/{lib,share}/kbd/consolefonts/latarcyrheb-sun16*; do
+ [[ -f "$i" ]] || continue
+ inst "$i"
+ done
+}
+
+install_terminfo() {
+ dinfo "Install terminfo files"
+ local terminfodir
+ for terminfodir in /lib/terminfo /etc/terminfo /usr/share/terminfo; do
+ [ -f "${terminfodir}/l/linux" ] && break
+ done
+ image_install -o "${terminfodir}/l/linux"
+}
+
+has_user_dbus_socket() {
+ if [ -f /usr/lib/systemd/user/dbus.socket ] || [ -f /etc/systemd/user/dbus.socket ]; then
+ return 0
+ else
+ echo "Per-user instances are not supported. Skipping..."
+ return 1
+ fi
+}
+
+setup_nspawn_root_hook() { :;}
+
+setup_nspawn_root() {
+ if [ -z "${initdir}" ]; then
+ dfatal "\$initdir not defined"
+ exit 1
+ fi
+
+ rm -rf "${TESTDIR:?}/unprivileged-nspawn-root"
+
+ if get_bool "$RUN_IN_UNPRIVILEGED_CONTAINER"; then
+ ddebug "cp -ar $initdir $TESTDIR/unprivileged-nspawn-root"
+ cp -ar "$initdir" "$TESTDIR/unprivileged-nspawn-root"
+ fi
+
+ setup_nspawn_root_hook
+}
+
+setup_basic_dirs() {
+ mkdir -p "${initdir:?}/run"
+ mkdir -p "$initdir/etc/systemd/system"
+ mkdir -p "$initdir/var/log/journal"
+
+
+ for d in usr/bin usr/sbin bin etc lib "${libdir:?}" sbin tmp usr var var/log var/tmp dev proc sys sysroot root run run/lock run/initramfs; do
+ if [ -L "/$d" ]; then
+ inst_symlink "/$d"
+ else
+ inst_dir "/$d"
+ fi
+ done
+
+ ln -sfn /run "$initdir/var/run"
+ ln -sfn /run/lock "$initdir/var/lock"
+}
+
+mask_supporting_services() {
+ # mask some services that we do not want to run in these tests
+ ln -fsv /dev/null "${initdir:?}/etc/systemd/system/systemd-hwdb-update.service"
+ ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-journal-catalog-update.service"
+ ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-networkd.service"
+ ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-networkd.socket"
+ ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-resolved.service"
+}
+
+inst_libs() {
+ local bin="${1:?}"
+ local so_regex='([^ ]*/lib[^/]*/[^ ]*\.so[^ ]*)'
+ local file line
+
+ while read -r line; do
+ [[ "$line" = 'not a dynamic executable' ]] && break
+ # Ignore errors about our own stuff missing. This is most likely caused
+ # by ldd attempting to use the unprefixed RPATH.
+ [[ "$line" =~ (libsystemd|libudev).*\ not\ found ]] && continue
+
+ if [[ "$line" =~ not\ found ]]; then
+ dfatal "Missing a shared library required by $bin."
+ dfatal "Run \"ldd $bin\" to find out what it is."
+ dfatal "$line"
+ dfatal "Cannot create a test image."
+ exit 1
+ fi
+
+ if [[ "$line" =~ $so_regex ]]; then
+ file="${BASH_REMATCH[1]}"
+ [[ -e "${initdir:?}/$file" ]] && continue
+ inst_library "$file"
+ fi
+ done < <(LC_ALL=C ldd "$bin" 2>/dev/null)
+}
+
+import_testdir() {
+ # make sure we don't get a stale LOOPDEV value from old times
+ local _LOOPDEV="${LOOPDEV:=}"
+ # We don't want shellcheck to follow & check the $STATEFILE
+ # shellcheck source=/dev/null
+ [[ -e "$STATEFILE" ]] && . "$STATEFILE"
+ LOOPDEV="$_LOOPDEV"
+ if [[ ! -d "$TESTDIR" ]]; then
+ if [[ -z "$TESTDIR" ]]; then
+ TESTDIR="$(mktemp --tmpdir=$WORKDIR -d -t systemd-test.XXXXXX)"
+ else
+ mkdir -p "$TESTDIR"
+ fi
+
+ cat >"$STATEFILE" <<EOF
+TESTDIR="$TESTDIR"
+EOF
+ export TESTDIR
+ fi
+
+ IMAGE_PRIVATE="${TESTDIR}/${IMAGE_NAME:?}.img"
+ IMAGE_PUBLIC="${IMAGESTATEDIR:?}/${IMAGE_NAME}.img"
+}
+
+import_initdir() {
+ initdir="${TESTDIR:?}/root"
+ mkdir -p "$initdir"
+ export initdir
+}
+
+get_cgroup_hierarchy() {
+ case "$(stat -c '%T' -f /sys/fs/cgroup)" in
+ cgroup2fs)
+ echo "unified"
+ ;;
+ tmpfs)
+ if [[ -d /sys/fs/cgroup/unified && "$(stat -c '%T' -f /sys/fs/cgroup/unified)" == cgroup2fs ]]; then
+ echo "hybrid"
+ else
+ echo "legacy"
+ fi
+ ;;
+ *)
+ dfatal "Failed to determine host's cgroup hierarchy"
+ exit 1
+ esac
+}
+
+## @brief Converts numeric logging level to the first letter of level name.
+#
+# @param lvl Numeric logging level in range from 1 to 6.
+# @retval 1 if @a lvl is out of range.
+# @retval 0 if @a lvl is correct.
+# @result Echoes first letter of level name.
+_lvl2char() {
+ case "$1" in
+ 1) echo F;;
+ 2) echo E;;
+ 3) echo W;;
+ 4) echo I;;
+ 5) echo D;;
+ 6) echo T;;
+ *) return 1;;
+ esac
+}
+
+## @brief Internal helper function for _do_dlog()
+#
+# @param lvl Numeric logging level.
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+#
+# @note This function is not supposed to be called manually. Please use
+# dtrace(), ddebug(), or others instead which wrap this one.
+#
+# This function calls _do_dlog() either with parameter msg, or if
+# none is given, it will read standard input and will use every line as
+# a message.
+#
+# This enables:
+# dwarn "This is a warning"
+# echo "This is a warning" | dwarn
+LOG_LEVEL="${LOG_LEVEL:-4}"
+
+dlog() {
+ local lvl lvlc
+
+ [ -z "$LOG_LEVEL" ] && return 0
+ lvl="${1:?}"; shift
+ [ "$lvl" -le "$LOG_LEVEL" ] || return 0
+ lvlc="$(_lvl2char "$lvl")" || return 0
+
+ if [ $# -ge 1 ]; then
+ echo "$lvlc: $*"
+ else
+ while read -r line; do
+ echo "$lvlc: " "$line"
+ done
+ fi
+}
+
+## @brief Logs message at TRACE level (6)
+#
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+dtrace() {
+ set +x
+ dlog 6 "$@"
+ if get_bool "${debug:=}"; then
+ set -x
+ fi
+}
+
+## @brief Logs message at DEBUG level (5)
+#
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+ddebug() {
+ dlog 5 "$@"
+}
+
+## @brief Logs message at INFO level (4)
+#
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+dinfo() {
+ set +x
+ dlog 4 "$@"
+ if get_bool "${debug:=}"; then
+ set -x
+ fi
+}
+
+## @brief Logs message at WARN level (3)
+#
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+dwarn() {
+ set +x
+ dlog 3 "$@"
+ if get_bool "${debug:=}"; then
+ set -x
+ fi
+}
+
+## @brief Logs message at ERROR level (2)
+#
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+derror() {
+ dlog 2 "$@"
+}
+
+## @brief Logs message at FATAL level (1)
+#
+# @param msg Message.
+# @retval 0 It's always returned, even if logging failed.
+dfatal() {
+ set +x
+ dlog 1 "$@"
+ if get_bool "${debug:=}"; then
+ set -x
+ fi
+}
+
+
+# Generic substring function. If $2 is in $1, return 0.
+strstr() { [ "${1#*"$2"*}" != "$1" ]; }
+
+# normalize_path <path>
+# Prints the normalized path, where it removes any duplicated
+# and trailing slashes.
+# Example:
+# $ normalize_path ///test/test//
+# /test/test
+normalize_path() {
+ shopt -q -s extglob
+ set -- "${1//+(\/)//}"
+ shopt -q -u extglob
+ echo "${1%/}"
+}
+
+# convert_abs_rel <from> <to>
+# Prints the relative path, when creating a symlink to <to> from <from>.
+# Example:
+# $ convert_abs_rel /usr/bin/test /bin/test-2
+# ../../bin/test-2
+# $ ln -s $(convert_abs_rel /usr/bin/test /bin/test-2) /usr/bin/test
+convert_abs_rel() {
+ local __current __absolute __abssize __cursize __newpath
+ local -i __i __level
+
+ set -- "$(normalize_path "${1:?}")" "$(normalize_path "${2:?}")"
+
+ # corner case #1 - self looping link
+ [[ "$1" == "$2" ]] && { echo "${1##*/}"; return; }
+
+ # corner case #2 - own dir link
+ [[ "${1%/*}" == "$2" ]] && { echo "."; return; }
+
+ IFS="/" read -ra __current <<< "$1"
+ IFS="/" read -ra __absolute <<< "$2"
+
+ __abssize=${#__absolute[@]}
+ __cursize=${#__current[@]}
+
+ while [[ "${__absolute[__level]}" == "${__current[__level]}" ]]; do
+ (( __level++ ))
+ if (( __level > __abssize || __level > __cursize ))
+ then
+ break
+ fi
+ done
+
+ for ((__i = __level; __i < __cursize-1; __i++)); do
+ if ((__i > __level))
+ then
+ __newpath=$__newpath"/"
+ fi
+ __newpath=$__newpath".."
+ done
+
+ for ((__i = __level; __i < __abssize; __i++)); do
+ if [[ -n $__newpath ]]
+ then
+ __newpath=$__newpath"/"
+ fi
+ __newpath=$__newpath${__absolute[__i]}
+ done
+
+ echo "$__newpath"
+}
+
+
+# Install a directory, keeping symlinks as on the original system.
+# Example: if /lib points to /lib64 on the host, "inst_dir /lib/file"
+# will create ${initdir}/lib64, ${initdir}/lib64/file,
+# and a symlink ${initdir}/lib -> lib64.
+inst_dir() {
+ local dir="${1:?}"
+ local part="${dir%/*}"
+ local file
+
+ [[ -e "${initdir:?}/${dir}" ]] && return 0 # already there
+
+ while [[ "$part" != "${part%/*}" ]] && ! [[ -e "${initdir}/${part}" ]]; do
+ dir="$part $dir"
+ part="${part%/*}"
+ done
+
+ # iterate over parent directories
+ for file in $dir; do
+ [[ -e "${initdir}/$file" ]] && continue
+ if [[ -L $file ]]; then
+ inst_symlink "$file"
+ else
+ # create directory
+ mkdir -m 0755 "${initdir}/$file" || return 1
+ [[ -e "$file" ]] && chmod --reference="$file" "${initdir}/$file"
+ chmod u+w "${initdir}/$file"
+ fi
+ done
+}
+
+# $1 = file to copy to ramdisk
+# $2 (optional) Name for the file on the ramdisk
+# Location of the image dir is assumed to be $initdir
+# We never overwrite the target if it exists.
+inst_simple() {
+ [[ -f "${1:?}" ]] || return 1
+ strstr "$1" "/" || return 1
+
+ local src="$1"
+ local target="${2:-$1}"
+ if ! [[ -d ${initdir:?}/$target ]]; then
+ [[ -e ${initdir}/$target ]] && return 0
+ [[ -L ${initdir}/$target ]] && return 0
+ [[ -d "${initdir}/${target%/*}" ]] || inst_dir "${target%/*}"
+ fi
+ # install checksum files also
+ if [[ -e "${src%/*}/.${src##*/}.hmac" ]]; then
+ inst "${src%/*}/.${src##*/}.hmac" "${target%/*}/.${target##*/}.hmac"
+ fi
+ ddebug "Installing $src"
+ cp --sparse=always --force --dereference --preserve=all "$src" "${initdir}/$target"
+}
+
+# find symlinks linked to given library file
+# $1 = library file
+# Function searches for symlinks by stripping version numbers appended to
+# library filename, checks if it points to the same target and finally
+# prints the list of symlinks to stdout.
+#
+# Example:
+# rev_lib_symlinks libfoo.so.8.1
+# output: libfoo.so.8 libfoo.so
+# (Only if libfoo.so.8 and libfoo.so exists on host system.)
+rev_lib_symlinks() {
+ local fn="${1:?}"
+ local links=""
+ local orig
+ orig="$(readlink -f "$1")"
+
+ [[ "${fn}" =~ .*\.so\..* ]] || return 1
+
+ until [[ "${fn##*.}" == so ]]; do
+ fn="${fn%.*}"
+ [[ -L "${fn}" && "$(readlink -f "${fn}")" == "${orig}" ]] && links+=" ${fn}"
+ done
+
+ echo "${links}"
+}
+
+# Same as above, but specialized to handle dynamic libraries.
+# It handles making symlinks according to how the original library
+# is referenced.
+inst_library() {
+ local src="${1:?}"
+ local dest="${2:-$1}"
+ local reallib symlink
+
+ strstr "$1" "/" || return 1
+ [[ -e ${initdir:?}/$dest ]] && return 0
+ if [[ -L $src ]]; then
+ # install checksum files also
+ if [[ -e "${src%/*}/.${src##*/}.hmac" ]]; then
+ inst "${src%/*}/.${src##*/}.hmac" "${dest%/*}/.${dest##*/}.hmac"
+ fi
+ reallib="$(readlink -f "$src")"
+ inst_simple "$reallib" "$reallib"
+ inst_dir "${dest%/*}"
+ [[ -d "${dest%/*}" ]] && dest="$(readlink -f "${dest%/*}")/${dest##*/}"
+ ddebug "Creating symlink $reallib -> $dest"
+ ln -sfn -- "$(convert_abs_rel "${dest}" "${reallib}")" "${initdir}/${dest}"
+ else
+ inst_simple "$src" "$dest"
+ fi
+
+ # Create additional symlinks. See rev_symlinks description.
+ for symlink in $(rev_lib_symlinks "$src") ${reallib:+$(rev_lib_symlinks "$reallib")}; do
+ if [[ ! -e "$initdir/$symlink" ]]; then
+ ddebug "Creating extra symlink: $symlink"
+ inst_symlink "$symlink"
+ fi
+ done
+}
+
+# find a binary. If we were not passed the full path directly,
+# search in the usual places to find the binary.
+find_binary() {
+ local bin="${1:?}"
+ if [[ -z ${bin##/*} ]]; then
+ if [[ -x "$bin" ]] || { strstr "$bin" ".so" && ldd "$bin" &>/dev/null; }; then
+ echo "$bin"
+ return 0
+ fi
+ fi
+
+ type -P "$bin"
+}
+
+# Same as above, but specialized to install binary executables.
+# Install binary executable, and all shared library dependencies, if any.
+inst_binary() {
+ local bin="${1:?}"
+ local path target
+
+ # In certain cases we might attempt to install a binary which is already
+ # present in the test image, yet it's missing from the host system.
+ # In such cases, let's check if the binary indeed exists in the image
+ # before doing any other checks. If it does, immediately return with
+ # success.
+ if [[ $# -eq 1 ]]; then
+ for path in "" bin sbin usr/bin usr/sbin; do
+ [[ -e "${initdir:?}${path:+/$path}/${bin}" ]] && return 0
+ done
+ fi
+
+ bin="$(find_binary "$bin")" || return 1
+ target="${2:-$bin}"
+ [[ -e "${initdir:?}/$target" ]] && return 0
+ [[ -L "$bin" ]] && inst_symlink "$bin" "$target" && return 0
+
+ local file line
+ local so_regex='([^ ]*/lib[^/]*/[^ ]*\.so[^ ]*)'
+ # DSOs provided by systemd
+ local systemd_so_regex='/(libudev|libsystemd.*|.+[\-_]systemd([\-_].+)?|libnss_(mymachines|myhostname|resolve)).so'
+ local wrap_binary=0
+ local enable_lsan=0
+ # I love bash!
+ while read -r line; do
+ [[ "$line" = 'not a dynamic executable' ]] && break
+
+ # Ignore errors about our own stuff missing. This is most likely caused
+ # by ldd attempting to use the unprefixed RPATH.
+ [[ "$line" =~ libsystemd.*\ not\ found ]] && continue
+
+ # We're built with ASan and the target binary loads one of the systemd's
+ # DSOs, so we need to tweak the environment before executing the binary
+ if get_bool "$IS_BUILT_WITH_ASAN" && [[ "$line" =~ $systemd_so_regex ]]; then
+ wrap_binary=1
+ fi
+
+ if [[ "$line" =~ $so_regex ]]; then
+ file="${BASH_REMATCH[1]}"
+ [[ -e "${initdir}/$file" ]] && continue
+ inst_library "$file"
+ continue
+ fi
+
+ if [[ "$line" =~ not\ found ]]; then
+ dfatal "Missing a shared library required by $bin."
+ dfatal "Run \"ldd $bin\" to find out what it is."
+ dfatal "$line"
+ dfatal "Cannot create a test image."
+ exit 1
+ fi
+ done < <(LC_ALL=C ldd "$bin" 2>/dev/null)
+
+ # Same as above, but we need to wrap certain libraries unconditionally
+ #
+ # chgrp, chown, getent, login, setfacl, su, useradd, userdel
+ # - dlopen() (not only) systemd's PAM modules
+ # ls, mkfs.*, mksquashfs, mkswap, setpriv, stat
+ # - pull in nss_systemd with certain options (like ls -l) when
+ # nsswitch.conf uses [SUCCESS=merge] (like on Arch Linux)
+ # delv, dig - pull in nss_resolve if `resolve` is in nsswitch.conf
+ # tar - called by machinectl in TEST-25
+ bin_rx='/(agetty|chgrp|chown|curl|delv|dig|getfacl|getent|id|login|ls|mkfs\.[a-z0-9]+|mksquashfs|mkswap|setfacl|setpriv|stat|su|tar|useradd|userdel)$'
+ if get_bool "$IS_BUILT_WITH_ASAN" && [[ "$bin" =~ $bin_rx ]]; then
+ wrap_binary=1
+ # Ugh, so we want to disable LSan in most cases for the wrapped binaries, since
+ # we don't care about memory leaks in such binaries. However, in certain cases
+ # the external binary is the only interface for the systemd code, like for
+ # the systemd NSS modules, where we want to detect memory leaks. So let's
+ # do another check to decide if we want to enable LSan for given binary.
+ if [[ "$bin" =~ /getent$ ]]; then
+ enable_lsan=1
+ fi
+ fi
+
+ # If the target binary is built with ASan support, we don't need to wrap
+ # it, as it should handle everything by itself
+ if get_bool "$wrap_binary" && ! is_built_with_asan "$bin"; then
+ dinfo "Creating ASan-compatible wrapper for binary '$target'"
+ # Install the target binary with a ".orig" suffix
+ inst_simple "$bin" "${target}.orig"
+ # Create a simple shell wrapper in place of the target binary, which
+ # sets necessary ASan-related env variables and then exec()s the
+ # suffixed target binary
+ cat >"$initdir/$target" <<EOF
+#!/bin/bash
+# Preload the ASan runtime DSO, otherwise ASAn will complain
+export LD_PRELOAD="$ASAN_RT_PATH"
+# Disable LSan to speed things up, since we don't care about leak reports
+# from 'external' binaries
+export ASAN_OPTIONS=detect_leaks=$enable_lsan
+# Set argv[0] to the original binary name without the ".orig" suffix
+exec -a "\$0" -- "${target}.orig" "\$@"
+EOF
+ chmod +x "$initdir/$target"
+ else
+ inst_simple "$bin" "$target"
+ fi
+}
+
+# same as above, except for shell scripts.
+# If your shell script does not start with shebang, it is not a shell script.
+inst_script() {
+ local bin line shebang_regex
+ bin="$(find_binary "${1:?}")" || return 1
+ shift
+
+ read -r -n 80 line <"$bin"
+ # If debug is set, clean unprintable chars to prevent messing up the term
+ get_bool "${debug:=}" && line="$(echo -n "$line" | tr -c -d '[:print:][:space:]')"
+ shebang_regex='(#! *)(/[^ ]+).*'
+ [[ "$line" =~ $shebang_regex ]] || return 1
+ inst "${BASH_REMATCH[2]}" && inst_simple "$bin" "$@"
+}
+
+# same as above, but specialized for symlinks
+inst_symlink() {
+ local src="${1:?}"
+ local target="${2:-$src}"
+ local realsrc
+
+ strstr "$src" "/" || return 1
+ [[ -L "$src" ]] || return 1
+ [[ -L "${initdir:?}/$target" ]] && return 0
+ realsrc="$(readlink -f "$src")"
+ if ! [[ -e "$initdir/$realsrc" ]]; then
+ if [[ -d "$realsrc" ]]; then
+ inst_dir "$realsrc"
+ else
+ inst "$realsrc"
+ fi
+ fi
+ [[ ! -e "$initdir/${target%/*}" ]] && inst_dir "${target%/*}"
+ [[ -d "${target%/*}" ]] && target="$(readlink -f "${target%/*}")/${target##*/}"
+ ln -sfn -- "$(convert_abs_rel "${target}" "${realsrc}")" "$initdir/$target"
+}
+
+# attempt to install any programs specified in a udev rule
+inst_rule_programs() {
+ local rule="${1:?}"
+ local prog bin
+
+ sed -rn 's/^.*?PROGRAM==?"([^ "]+).*$/\1/p' "$rule" | while read -r prog; do
+ if [ -x "/lib/udev/$prog" ]; then
+ bin="/lib/udev/$prog"
+ else
+ if ! bin="$(find_binary "$prog")"; then
+ dinfo "Skipping program $prog used in udev rule $(basename "$rule") as it cannot be found"
+ continue
+ fi
+ fi
+
+ #dinfo "Installing $_bin due to it's use in the udev rule $(basename $1)"
+ image_install "$bin"
+ done
+}
+
+# udev rules always get installed in the same place, so
+# create a function to install them to make life simpler.
+inst_rules() {
+ local target=/etc/udev/rules.d
+ local found rule
+
+ inst_dir "/lib/udev/rules.d"
+ inst_dir "$target"
+ for rule in "$@"; do
+ if [ "${rule#/}" = "$rule" ]; then
+ for r in /lib/udev/rules.d /etc/udev/rules.d; do
+ if [[ -f "$r/$rule" ]]; then
+ found="$r/$rule"
+ inst_simple "$found"
+ inst_rule_programs "$found"
+ fi
+ done
+ fi
+ for r in '' ./; do
+ if [[ -f "${r}${rule}" ]]; then
+ found="${r}${rule}"
+ inst_simple "$found" "$target/${found##*/}"
+ inst_rule_programs "$found"
+ fi
+ done
+ [[ $found ]] || dinfo "Skipping udev rule: $rule"
+ found=
+ done
+}
+
+# general purpose installation function
+# Same args as above.
+inst() {
+ case $# in
+ 1) ;;
+ 2)
+ [[ ! "$initdir" && -d "$2" ]] && export initdir="$2"
+ [[ "$initdir" = "$2" ]] && set "$1"
+ ;;
+ 3)
+ [[ -z "$initdir" ]] && export initdir="$2"
+ set "$1" "$3"
+ ;;
+ *)
+ dfatal "inst only takes 1 or 2 or 3 arguments"
+ exit 1
+ ;;
+ esac
+
+ local fun
+ for fun in inst_symlink inst_script inst_binary inst_simple; do
+ "$fun" "$@" && return 0
+ done
+
+ dwarn "Failed to install '$1'"
+ return 1
+}
+
+# install any of listed files
+#
+# If first argument is '-d' and second some destination path, first accessible
+# source is installed into this path, otherwise it will installed in the same
+# path as source. If none of listed files was installed, function return 1.
+# On first successful installation it returns with 0 status.
+#
+# Example:
+#
+# inst_any -d /bin/foo /bin/bar /bin/baz
+#
+# Lets assume that /bin/baz exists, so it will be installed as /bin/foo in
+# initrd.
+inst_any() {
+ local dest file
+
+ [[ "${1:?}" = '-d' ]] && dest="${2:?}" && shift 2
+
+ for file in "$@"; do
+ if [[ -e "$file" ]]; then
+ [[ -n "$dest" ]] && inst "$file" "$dest" && return 0
+ inst "$file" && return 0
+ fi
+ done
+
+ return 1
+}
+
+inst_recursive() {
+ local p item
+
+ for p in "$@"; do
+ # Make sure the source exists, as the process substitution below
+ # suppresses errors
+ stat "$p" >/dev/null || return 1
+
+ while read -r item; do
+ if [[ -d "$item" ]]; then
+ inst_dir "$item"
+ elif [[ -f "$item" ]]; then
+ inst_simple "$item"
+ fi
+ done < <(find "$p" 2>/dev/null)
+ done
+}
+
+# image_install [-o ] <file> [<file> ... ]
+# Install <file> to the test image
+# -o optionally install the <file> and don't fail, if it is not there
+image_install() {
+ local optional=no
+ local prog="${1:?}"
+
+ if [[ "$prog" = '-o' ]]; then
+ optional=yes
+ shift
+ fi
+
+ for prog in "$@"; do
+ if ! inst "$prog" ; then
+ if get_bool "$optional"; then
+ dinfo "Skipping program $prog as it cannot be found and is" \
+ "flagged to be optional"
+ else
+ dfatal "Failed to install $prog"
+ exit 1
+ fi
+ fi
+ done
+}
+
+# Install a single kernel module along with any firmware it may require.
+# $1 = full path to kernel module to install
+install_kmod_with_fw() {
+ local module="${1:?}"
+ # no need to go further if the module is already installed
+ [[ -e "${initdir:?}/lib/modules/${KERNEL_VER:?}/${module##*"/lib/modules/$KERNEL_VER/"}" ]] && return 0
+ [[ -e "$initdir/.kernelmodseen/${module##*/}" ]] && return 0
+
+ [ -d "$initdir/.kernelmodseen" ] && : >"$initdir/.kernelmodseen/${module##*/}"
+
+ inst_simple "$module" "/lib/modules/$KERNEL_VER/${module##*"/lib/modules/$KERNEL_VER/"}" || return $?
+
+ local modname="${module##*/}"
+ local fwdir found fw
+ modname="${modname%.ko*}"
+
+ while read -r fw; do
+ found=
+ for fwdir in /lib/firmware/updates /lib/firmware; do
+ if [[ -d "$fwdir" && -f "$fwdir/$fw" ]]; then
+ inst_simple "$fwdir/$fw" "/lib/firmware/$fw"
+ found=yes
+ fi
+ done
+ if ! get_bool "$found"; then
+ if ! grep -qe "\<${modname//-/_}\>" /proc/modules; then
+ dinfo "Possible missing firmware \"${fw}\" for kernel module" \
+ "\"${modname}.ko\""
+ else
+ dwarn "Possible missing firmware \"${fw}\" for kernel module" \
+ "\"${modname}.ko\""
+ fi
+ fi
+ done < <(modinfo -k "$KERNEL_VER" -F firmware "$module" 2>/dev/null)
+ return 0
+}
+
+# Do something with all the dependencies of a kernel module.
+# Note that kernel modules depend on themselves using the technique we use
+# $1 = function to call for each dependency we find
+# It will be passed the full path to the found kernel module
+# $2 = module to get dependencies for
+# rest of args = arguments to modprobe
+for_each_kmod_dep() {
+ local func="${1:?}"
+ local kmod="${2:?}"
+ local found=0
+ local cmd modpath
+ shift 2
+
+ while read -r cmd modpath _; do
+ [[ "$cmd" = insmod ]] || continue
+ "$func" "$modpath" || return $?
+ found=1
+ done < <(modprobe "$@" --ignore-install --show-depends "$kmod")
+
+ ! get_bool "$found" && return 1
+ return 0
+}
+
+# instmods [-c] <kernel module> [<kernel module> ... ]
+# instmods [-c] <kernel subsystem>
+# install kernel modules along with all their dependencies.
+# <kernel subsystem> can be e.g. "=block" or "=drivers/usb/storage"
+# FIXME(?): dracutdevs/dracut@f4e38c0da8d6bf3764c1ad753d9d52aef63050e5
+instmods() {
+ local check=no
+ if [[ $# -ge 0 && "$1" = '-c' ]]; then
+ check=yes
+ shift
+ fi
+
+ inst1mod() {
+ local mod="${1:?}"
+ local ret=0
+ local mod_dir="/lib/modules/${KERNEL_VER:?}/"
+
+ case "$mod" in
+ =*)
+ if [ -f "${mod_dir}/modules.${mod#=}" ]; then
+ (
+ [[ "$mpargs" ]] && echo "$mpargs"
+ cat "${mod_dir}/modules.${mod#=}"
+ ) | instmods
+ else
+ (
+ [[ "$mpargs" ]] && echo "$mpargs"
+ find "$mod_dir" -path "*/${mod#=}/*" -name "*.ko*" -type f -printf '%f\n'
+ ) | instmods
+ fi
+ ;;
+ --*)
+ mpargs+=" $mod"
+ ;;
+ i2o_scsi)
+ # Do not load this diagnostic-only module
+ return
+ ;;
+ *)
+ mod=${mod##*/}
+ # if we are already installed, skip this module and go on
+ # to the next one.
+ [[ -f "${initdir:?}/.kernelmodseen/${mod%.ko}.ko" ]] && return
+
+ # We use '-d' option in modprobe only if modules prefix path
+ # differs from default '/'. This allows us to use Dracut with
+ # old version of modprobe which doesn't have '-d' option.
+ local mod_dirname=${mod_dir%%/lib/modules/*}
+ [[ -n ${mod_dirname} ]] && mod_dirname="-d ${mod_dirname}/"
+
+ # ok, load the module, all its dependencies, and any firmware
+ # it may require
+ for_each_kmod_dep install_kmod_with_fw "$mod" \
+ --set-version "$KERNEL_VER" \
+ ${mod_dirname:+"$mod_dirname"} \
+ ${mpargs:+"$mpargs"}
+ ((ret+=$?))
+ ;;
+ esac
+ return "$ret"
+ }
+
+ local mod mpargs
+
+ if [[ $# -eq 0 ]]; then # filenames from stdin
+ while read -r mod; do
+ if ! inst1mod "${mod%.ko*}" && [ "$check" = "yes" ]; then
+ dfatal "Failed to install $mod"
+ return 1
+ fi
+ done
+ fi
+
+ for mod in "$@"; do # filenames as arguments
+ if ! inst1mod "${mod%.ko*}" && [ "$check" = "yes" ]; then
+ dfatal "Failed to install $mod"
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+_umount_dir() {
+ local mountpoint="${1:?}"
+ if mountpoint -q "$mountpoint"; then
+ ddebug "umount $mountpoint"
+ umount "$mountpoint"
+ fi
+}
+
+# can be overridden in specific test
+test_setup_cleanup() {
+ cleanup_initdir
+}
+
+_test_cleanup() {
+ # (post-test) cleanup should always ignore failure and cleanup as much as possible
+ (
+ set +e
+ [[ -n "$initdir" ]] && _umount_dir "$initdir"
+ [[ -n "$IMAGE_PUBLIC" ]] && rm -vf "$IMAGE_PUBLIC"
+ # If multiple setups/cleans are ran in parallel, this can cause a race
+ if [[ -n "$IMAGESTATEDIR" && $TEST_PARALLELIZE -ne 1 ]]; then
+ rm -vf "${IMAGESTATEDIR}/default.img"
+ fi
+ [[ -n "$TESTDIR" ]] && rm -vfr "$TESTDIR"
+ [[ -n "$STATEFILE" ]] && rm -vf "$STATEFILE"
+ ) || :
+}
+
+# can be overridden in specific test
+test_cleanup() {
+ _test_cleanup
+}
+
+test_cleanup_again() {
+ [ -n "$TESTDIR" ] || return
+ rm -rf "$TESTDIR/unprivileged-nspawn-root"
+ [[ -n "$initdir" ]] && _umount_dir "$initdir"
+}
+
+test_create_image() {
+ create_empty_image_rootdir
+
+ # Create what will eventually be our root filesystem onto an overlay
+ (
+ LOG_LEVEL=5
+ setup_basic_environment
+ )
+}
+
+test_setup() {
+ if ! get_bool "$NO_BUILD" && \
+ get_bool "${TEST_REQUIRE_INSTALL_TESTS:?}" && \
+ command -v meson >/dev/null && \
+ [[ "$(meson configure "${BUILD_DIR:?}" | grep install-tests | awk '{ print $2 }')" != "true" ]]; then
+ dfatal "$BUILD_DIR needs to be built with -Dinstall-tests=true"
+ exit 1
+ fi
+
+ if [ -e "${IMAGE_PRIVATE:?}" ]; then
+ echo "Reusing existing image $IMAGE_PRIVATE → $(realpath "$IMAGE_PRIVATE")"
+ mount_initdir
+ else
+ if [ ! -e "${IMAGE_PUBLIC:?}" ]; then
+ # default.img is the base that every test uses and optionally appends to
+ if [ ! -e "${IMAGESTATEDIR:?}/default.img" ] || [ -n "${TEST_FORCE_NEWIMAGE:=}" ]; then
+ # Create the backing public image, but then completely unmount
+ # it and drop the loopback device responsible for it, since we're
+ # going to symlink/copy the image and mount it again from
+ # elsewhere.
+ local image_old="${IMAGE_PUBLIC}"
+ if [ -z "${TEST_FORCE_NEWIMAGE}" ]; then
+ IMAGE_PUBLIC="${IMAGESTATEDIR}/default.img"
+ fi
+ test_create_image
+ test_setup_cleanup
+ umount_loopback
+ cleanup_loopdev
+ IMAGE_PUBLIC="${image_old}"
+ fi
+ if [ "${IMAGE_NAME:?}" != "default" ] && ! get_bool "${TEST_FORCE_NEWIMAGE}"; then
+ cp -v "$(realpath "${IMAGESTATEDIR}/default.img")" "$IMAGE_PUBLIC"
+ fi
+ fi
+
+ local hook_defined
+ declare -f -F test_append_files >/dev/null && hook_defined=yes || hook_defined=no
+
+ echo "Reusing existing cached image $IMAGE_PUBLIC → $(realpath "$IMAGE_PUBLIC")"
+ if get_bool "$TEST_PARALLELIZE" || get_bool "$hook_defined"; then
+ cp -v -- "$(realpath "$IMAGE_PUBLIC")" "$IMAGE_PRIVATE"
+ else
+ ln -sv -- "$(realpath "$IMAGE_PUBLIC")" "$IMAGE_PRIVATE"
+ fi
+
+ mount_initdir
+
+ if get_bool "${TEST_SUPPORTING_SERVICES_SHOULD_BE_MASKED}"; then
+ dinfo "Masking supporting services"
+ mask_supporting_services
+ fi
+
+ if get_bool "$IS_BUILT_WITH_COVERAGE"; then
+ # Do an initial coverage capture, to make sure the final report includes
+ # files that the tests didn't touch at all
+ lcov --initial --capture --directory "${initdir}/${BUILD_DIR:?}" --output-file "${TESTDIR:?}/coverage-base"
+ fi
+
+ if get_bool "$hook_defined"; then
+ test_append_files "${initdir:?}"
+ fi
+ fi
+
+ setup_nspawn_root
+}
+
+test_run() {
+ local test_id="${1:?}"
+ mount_initdir
+
+ if ! get_bool "${TEST_NO_QEMU:=}"; then
+ if run_qemu "$test_id"; then
+ check_result_qemu || { echo "qemu test failed"; return 1; }
+ else
+ dwarn "can't run qemu, skipping"
+ fi
+ fi
+ if ! get_bool "${TEST_NO_NSPAWN:=}"; then
+ mount_initdir
+ if run_nspawn "${initdir:?}" "$test_id"; then
+ check_result_nspawn "$initdir" || { echo "nspawn-root test failed"; return 1; }
+ else
+ dwarn "can't run systemd-nspawn, skipping"
+ fi
+
+ if get_bool "${RUN_IN_UNPRIVILEGED_CONTAINER:=}"; then
+ dir="$TESTDIR/unprivileged-nspawn-root"
+ if NSPAWN_ARGUMENTS="-U --private-network ${NSPAWN_ARGUMENTS:-}" run_nspawn "$dir" "$test_id"; then
+ check_result_nspawn "$dir" || { echo "unprivileged-nspawn-root test failed"; return 1; }
+ else
+ dwarn "can't run systemd-nspawn, skipping"
+ fi
+ fi
+ fi
+ return 0
+}
+
+do_test() {
+ if [[ $UID != "0" ]]; then
+ echo "TEST: $TEST_DESCRIPTION [SKIPPED]: not root" >&2
+ exit 0
+ fi
+
+ if get_bool "${TEST_NO_QEMU:=}" && get_bool "${TEST_NO_NSPAWN:=}"; then
+ echo "TEST: $TEST_DESCRIPTION [SKIPPED]: both qemu and nspawn disabled" >&2
+ exit 0
+ fi
+
+ if get_bool "${TEST_QEMU_ONLY:=}" && ! get_bool "$TEST_NO_NSPAWN"; then
+ echo "TEST: $TEST_DESCRIPTION [SKIPPED]: qemu-only tests requested" >&2
+ exit 0
+ fi
+
+ if get_bool "${TEST_PREFER_NSPAWN:=}" && ! get_bool "$TEST_NO_NSPAWN"; then
+ TEST_NO_QEMU=1
+ fi
+
+ # Detect lib paths
+ [[ "$libdir" ]] || for libdir in /lib64 /lib; do
+ [[ -d $libdir ]] && libdirs+=" $libdir" && break
+ done
+
+ [[ "$usrlibdir" ]] || for usrlibdir in /usr/lib64 /usr/lib; do
+ [[ -d $usrlibdir ]] && libdirs+=" $usrlibdir" && break
+ done
+
+ mkdir -p "$WORKDIR"
+ mkdir -p "$STATEDIR"
+
+ import_testdir
+ import_initdir
+
+ if [ -n "${SUDO_USER}" ]; then
+ ddebug "Making ${TESTDIR:?} readable for ${SUDO_USER} (acquired from sudo)"
+ setfacl -m "user:${SUDO_USER:?}:r-X" "${TESTDIR:?}"
+ fi
+
+ testname="$(basename "$PWD")"
+
+ while (($# > 0)); do
+ case $1 in
+ --run)
+ echo "${testname} RUN: $TEST_DESCRIPTION"
+ test_run "$TESTID"
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ echo "${testname} RUN: $TEST_DESCRIPTION [OK]"
+ else
+ echo "${testname} RUN: $TEST_DESCRIPTION [FAILED]"
+ fi
+ exit $ret
+ ;;
+ --setup)
+ echo "${testname} SETUP: $TEST_DESCRIPTION"
+ test_setup
+ test_setup_cleanup
+ ;;
+ --clean)
+ echo "${testname} CLEANUP: $TEST_DESCRIPTION"
+ test_cleanup
+ ;;
+ --clean-again)
+ echo "${testname} CLEANUP AGAIN: $TEST_DESCRIPTION"
+ test_cleanup_again
+ ;;
+ --all)
+ ret=0
+ echo -n "${testname}: $TEST_DESCRIPTION "
+ # Do not use a subshell, otherwise cleanup variables (LOOPDEV) will be lost
+ # and loop devices will leak
+ test_setup </dev/null >"$TESTLOG" 2>&1 || ret=$?
+ if [ $ret -eq 0 ]; then
+ test_setup_cleanup </dev/null >>"$TESTLOG" 2>&1 || ret=$?
+ fi
+ if [ $ret -eq 0 ]; then
+ test_run "$TESTID" </dev/null >>"$TESTLOG" 2>&1 || ret=$?
+ fi
+ test_cleanup
+ if [ $ret -eq 0 ]; then
+ # $TESTLOG is in $STATEDIR, so clean it up only on success
+ [[ -n "$STATEDIR" ]] && rm -vfr "$STATEDIR"
+ echo "[OK]"
+ else
+ echo "[FAILED]"
+ echo "see $TESTLOG"
+ fi
+ exit $ret
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+ done
+}