From b750101eb236130cf056c675997decbac904cc49 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 17:35:18 +0200 Subject: Adding upstream version 252.22. Signed-off-by: Daniel Baumann --- test/TEST-64-UDEV-STORAGE/test.sh | 547 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 547 insertions(+) create mode 100755 test/TEST-64-UDEV-STORAGE/test.sh (limited to 'test/TEST-64-UDEV-STORAGE/test.sh') diff --git a/test/TEST-64-UDEV-STORAGE/test.sh b/test/TEST-64-UDEV-STORAGE/test.sh new file mode 100755 index 0000000..e329433 --- /dev/null +++ b/test/TEST-64-UDEV-STORAGE/test.sh @@ -0,0 +1,547 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: LGPL-2.1-or-later +# vi: ts=4 sw=4 tw=0 et: +# +# TODO: +# * SW raid (mdadm) +# * MD (mdadm) -> dm-crypt -> LVM +# * iSCSI -> dm-crypt -> LVM +set -e + +TEST_DESCRIPTION="systemd-udev storage tests" +TEST_NO_NSPAWN=1 +# Save only journals of failing test cases by default (to conserve space) +TEST_SAVE_JOURNAL="${TEST_SAVE_JOURNAL:-fail}" + +# shellcheck source=test/test-functions +. "${TEST_BASE_DIR:?}/test-functions" + +USER_QEMU_OPTIONS="${QEMU_OPTIONS:-}" +USER_KERNEL_APPEND="${KERNEL_APPEND:-}" + +_host_has_feature() {( + set -e + + case "${1:?}" in + btrfs) + host_has_btrfs + ;; + iscsi) + # Client/initiator (Open-iSCSI) + command -v iscsiadm && command -v iscsid || return $? + # Server/target (TGT) + command -v tgtadm && command -v tgtd || return $? + ;; + lvm) + command -v lvm || return $? + ;; + mdadm) + host_has_mdadm + ;; + multipath) + command -v multipath && command -v multipathd || return $? + ;; + *) + echo >&2 "ERROR: Unknown feature '$1'" + # Make this a hard error to distinguish an invalid feature from + # a missing feature + exit 1 + esac +)} + +test_append_files() {( + local feature + # An associative array of requested (but optional) features and their + # respective "handlers" from test/test-functions + # + # Note: we install cryptsetup unconditionally, hence it's not explicitly + # checked for here + local -A features=( + [btrfs]=install_btrfs + [iscsi]=install_iscsi + [lvm]=install_lvm + [mdadm]=install_mdadm + [multipath]=install_multipath + ) + + instmods "=block" "=md" "=nvme" "=scsi" + install_dmevent + image_install lsblk swapoff swapon wc wipefs + + # Install the optional features if the host has the respective tooling + for feature in "${!features[@]}"; do + if _host_has_feature "$feature"; then + "${features[$feature]}" + fi + done + + generate_module_dependencies + + for i in {0..127}; do + dd if=/dev/zero of="${TESTDIR:?}/disk$i.img" bs=1M count=1 + echo "device$i" >"${TESTDIR:?}/disk$i.img" + done +)} + +_image_cleanup() { + mount_initdir + # Clean up certain "problematic" files which may be left over by failing tests + : >"${initdir:?}/etc/fstab" + : >"${initdir:?}/etc/crypttab" +} + +test_run_one() { + local test_id="${1:?}" + + if run_qemu "$test_id"; then + check_result_qemu || { echo "qemu test failed"; return 1; } + fi + + return 0 +} + +test_run() { + local test_id="${1:?}" + local passed=() + local failed=() + local skipped=() + local ec state + + mount_initdir + + if get_bool "${TEST_NO_QEMU:=}" || ! find_qemu_bin; then + dwarn "can't run qemu, skipping" + return 0 + fi + + # Execute each currently defined function starting with "testcase_" + for testcase in "${TESTCASES[@]}"; do + _image_cleanup + echo "------ $testcase: BEGIN ------" + # Note for my future frustrated self: `fun && xxx` (as well as ||, if, while, + # until, etc.) _DISABLES_ the `set -e` behavior in _ALL_ nested function + # calls made from `fun()`, i.e. the function _CONTINUES_ even when a called + # command returned non-zero EC. That may unexpectedly hide failing commands + # if not handled properly. See: bash(1) man page, `set -e` section. + # + # So, be careful when adding clean up snippets in the testcase_*() functions - + # if the `test_run_one()` function isn't the last command, you have propagate + # the exit code correctly (e.g. `test_run_one() || return $?`, see below). + ec=0 + "$testcase" "$test_id" || ec=$? + case $ec in + 0) + passed+=("$testcase") + state="PASS" + ;; + 77) + skipped+=("$testcase") + state="SKIP" + ;; + *) + failed+=("$testcase") + state="FAIL" + esac + echo "------ $testcase: END ($state) ------" + done + + echo "Passed tests: ${#passed[@]}" + printf " * %s\n" "${passed[@]}" + echo "Skipped tests: ${#skipped[@]}" + printf " * %s\n" "${skipped[@]}" + echo "Failed tests: ${#failed[@]}" + printf " * %s\n" "${failed[@]}" + + [[ ${#failed[@]} -eq 0 ]] || return 1 + + return 0 +} + +testcase_megasas2_basic() { + if ! "${QEMU_BIN:?}" -device help | grep 'name "megasas-gen2"'; then + echo "megasas-gen2 device driver is not available, skipping test..." + return 77 + fi + + local i + local qemu_opts=( + "-device megasas-gen2,id=scsi0" + "-device megasas-gen2,id=scsi1" + "-device megasas-gen2,id=scsi2" + "-device megasas-gen2,id=scsi3" + ) + + for i in {0..127}; do + # Add 128 drives, 32 per bus + qemu_opts+=( + "-device scsi-hd,drive=drive$i,bus=scsi$((i / 32)).0,channel=0,scsi-id=$((i % 32)),lun=0" + "-drive format=raw,cache=unsafe,file=${TESTDIR:?}/disk$i.img,if=none,id=drive$i" + ) + done + + KERNEL_APPEND="systemd.setenv=TEST_FUNCTION_NAME=${FUNCNAME[0]} ${USER_KERNEL_APPEND:-}" + QEMU_OPTIONS="${qemu_opts[*]} ${USER_QEMU_OPTIONS:-}" + test_run_one "${1:?}" +} + +testcase_nvme_basic() { + if ! "${QEMU_BIN:?}" -device help | grep 'name "nvme"'; then + echo "nvme device driver is not available, skipping test..." + return 77 + fi + + local i + local qemu_opts=() + + for i in {0..27}; do + qemu_opts+=( + "-device nvme,drive=nvme$i,serial=deadbeef$i,num_queues=8" + "-drive format=raw,cache=unsafe,file=${TESTDIR:?}/disk$i.img,if=none,id=nvme$i" + ) + done + + KERNEL_APPEND="systemd.setenv=TEST_FUNCTION_NAME=${FUNCNAME[0]} ${USER_KERNEL_APPEND:-}" + QEMU_OPTIONS="${qemu_opts[*]} ${USER_QEMU_OPTIONS:-}" + test_run_one "${1:?}" +} + +# Testcase for: +# * https://github.com/systemd/systemd/pull/24748 +# * https://github.com/systemd/systemd/pull/24766 +# * https://github.com/systemd/systemd/pull/24946 +# Docs: https://qemu.readthedocs.io/en/latest/system/devices/nvme.html#nvm-subsystems +testcase_nvme_subsystem() { + if ! "${QEMU_BIN:?}" -device help | grep 'name "nvme-subsys"'; then + echo "nvme-subsystem device driver is not available, skipping test..." + return 77 + fi + + local i + local qemu_opts=( + # Create an NVM Subsystem Device + "-device nvme-subsys,id=nvme-subsys-64,nqn=subsys64" + # Attach two NVM controllers to it + "-device nvme,subsys=nvme-subsys-64,serial=deadbeef" + "-device nvme,subsys=nvme-subsys-64,serial=deadbeef" + # And create two shared namespaces attached to both controllers + "-device nvme-ns,drive=nvme0,nsid=16,shared=on" + "-drive format=raw,cache=unsafe,file=${TESTDIR:?}/disk0.img,if=none,id=nvme0" + "-device nvme-ns,drive=nvme1,nsid=17,shared=on" + "-drive format=raw,cache=unsafe,file=${TESTDIR:?}/disk1.img,if=none,id=nvme1" + ) + + KERNEL_APPEND="systemd.setenv=TEST_FUNCTION_NAME=${FUNCNAME[0]} ${USER_KERNEL_APPEND:-}" + QEMU_OPTIONS="${qemu_opts[*]} ${USER_QEMU_OPTIONS:-}" + test_run_one "${1:?}" +} + +# Test for issue https://github.com/systemd/systemd/issues/20212 +testcase_virtio_scsi_identically_named_partitions() { + + if ! "${QEMU_BIN:?}" -device help | grep 'name "virtio-scsi-pci"'; then + echo "virtio-scsi-pci device driver is not available, skipping test..." + return 77 + fi + + # Create 16 disks, with 8 partitions per disk (all identically named) + # and attach them to a virtio-scsi controller + local qemu_opts=("-device virtio-scsi-pci,id=scsi0,num_queues=4") + local diskpath="${TESTDIR:?}/namedpart0.img" + local i lodev num_disk num_part qemu_timeout + + if get_bool "${IS_BUILT_WITH_ASAN:=}" || ! get_bool "$QEMU_KVM"; then + num_disk=4 + num_part=4 + else + num_disk=16 + num_part=8 + fi + + dd if=/dev/zero of="$diskpath" bs=1M count=18 + lodev="$(losetup --show -f -P "$diskpath")" + sfdisk "${lodev:?}" <