summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--.mailmap6
-rw-r--r--.perltidyrc15
-rw-r--r--CHANGELOG.md393
-rw-r--r--README.md175
-rwxr-xr-xcaching_proxy.py118
-rwxr-xr-xcoverage.py457
-rwxr-xr-xcoverage.sh114
-rw-r--r--coverage.txt436
-rw-r--r--examples/twb/debian-11-minimal.py100
-rw-r--r--examples/twb/debian-sid-zfs.py212
-rwxr-xr-xgpgvnoexpkeysig51
-rwxr-xr-xhooks/busybox/extract00.sh14
-rwxr-xr-xhooks/busybox/setup00.sh17
-rwxr-xr-xhooks/copy-host-apt-sources-and-preferences/customize00.pl44
-rwxr-xr-xhooks/copy-host-apt-sources-and-preferences/setup00.sh46
-rw-r--r--hooks/eatmydata/README.txt5
-rwxr-xr-xhooks/eatmydata/customize.sh30
-rwxr-xr-xhooks/eatmydata/extract.sh75
-rwxr-xr-xhooks/file-mirror-automount/customize00.sh41
-rwxr-xr-xhooks/file-mirror-automount/setup00.sh73
-rwxr-xr-xhooks/jessie-or-older/extract00.sh16
-rwxr-xr-xhooks/jessie-or-older/extract01.sh47
-rwxr-xr-xhooks/maybe-jessie-or-older/extract00.sh37
-rwxr-xr-xhooks/maybe-jessie-or-older/extract01.sh57
-rwxr-xr-xhooks/maybe-merged-usr/essential00.sh36
-rwxr-xr-xhooks/maybe-merged-usr/extract00.sh27
-rwxr-xr-xhooks/maybe-merged-usr/setup00.sh27
-rwxr-xr-xhooks/merged-usr/essential00.sh28
-rwxr-xr-xhooks/merged-usr/extract00.sh85
-rwxr-xr-xhooks/merged-usr/setup00.sh79
l---------hooks/no-merged-usr/essential00.sh1
-rwxr-xr-xhooks/no-merged-usr/setup00.sh54
-rwxr-xr-xldconfig.fakechroot131
-rwxr-xr-xmake_mirror.sh568
-rwxr-xr-xmmdebstrap7897
-rwxr-xr-xmmdebstrap-autopkgtest-build-qemu444
-rwxr-xr-xproxysolver56
-rwxr-xr-xrun_null.sh40
-rwxr-xr-xrun_qemu.sh49
-rwxr-xr-xtarfilter300
-rw-r--r--tests/apt-patterns8
-rw-r--r--tests/apt-patterns-custom9
-rw-r--r--tests/aptopt9
-rw-r--r--tests/arm64-without-qemu-support18
-rw-r--r--tests/as-debootstrap-unshare-wrapper133
-rw-r--r--tests/ascii-armored-keys21
-rw-r--r--tests/aspcud-apt-solver11
-rw-r--r--tests/auto-mode-as-normal-user22
-rw-r--r--tests/auto-mode-without-unshare-capabilities14
-rw-r--r--tests/automatic-mirror-from-suite14
-rw-r--r--tests/check-against-debootstrap-dist228
-rw-r--r--tests/check-for-bit-by-bit-identical-format-output28
-rw-r--r--tests/chroot-directory-not-accessible-by-apt-user8
-rw-r--r--tests/chrootless16
-rw-r--r--tests/chrootless-fakeroot43
-rw-r--r--tests/chrootless-foreign68
-rw-r--r--tests/compare-output-with-pre-seeded-var-cache-apt-archives44
-rw-r--r--tests/copy-mirror10
-rw-r--r--tests/create-directory9
-rw-r--r--tests/create-directory-dry-run29
-rw-r--r--tests/create-foreign-tarball77
-rw-r--r--tests/create-gzip-compressed-tarball20
-rw-r--r--tests/create-tarball-dry-run27
-rw-r--r--tests/create-tarball-with-tmp-mounted-nodev12
-rw-r--r--tests/custom-tmpdir33
-rw-r--r--tests/customize-hook16
-rw-r--r--tests/cwd-directory-not-accessible-by-unshared-user30
-rw-r--r--tests/deb822-1-245
-rw-r--r--tests/deb822-2-244
-rw-r--r--tests/debootstrap10
-rw-r--r--tests/debootstrap-no-op-options6
-rw-r--r--tests/debug17
-rw-r--r--tests/debug-output-on-fake-tty6
-rw-r--r--tests/dev-ptmx149
-rw-r--r--tests/directory-ending-in-tar12
-rw-r--r--tests/dist-using-codename13
-rw-r--r--tests/dpkgopt10
-rw-r--r--tests/eatmydata-via-hook-dir43
-rw-r--r--tests/empty-sources.list8
-rw-r--r--tests/error-if-stdout-is-tty12
-rw-r--r--tests/essential-hook21
-rw-r--r--tests/existing-directory-with-lost-found9
-rw-r--r--tests/existing-empty-directory7
-rw-r--r--tests/fail-installing-to-existing-file13
-rw-r--r--tests/fail-installing-to-non-empty-lost-found13
-rw-r--r--tests/fail-installing-to-non-empty-target-directory13
-rw-r--r--tests/fail-installing-to-root9
-rw-r--r--tests/fail-with-missing-lz49
-rw-r--r--tests/fail-with-path-with-quotes12
-rw-r--r--tests/fail-without-etc-subuid16
-rw-r--r--tests/fail-without-username-in-etc-subuid17
-rw-r--r--tests/failing-customize-hook10
-rw-r--r--tests/file-mirror13
-rw-r--r--tests/file-mirror-automount-hook20
-rw-r--r--tests/help6
-rw-r--r--tests/hook-directory49
-rw-r--r--tests/i386-which-can-be-executed-without-qemu41
-rw-r--r--tests/include12
-rw-r--r--tests/include-deb-file40
-rw-r--r--tests/include-foreign-libmagic-mgc47
-rw-r--r--tests/include-foreign-libmagic-mgc-with-multiple-arch-options48
-rw-r--r--tests/include-with-multiple-apt-sources10
-rw-r--r--tests/install-busybox-based-sub-essential-system41
-rw-r--r--tests/install-doc-debian56
-rw-r--r--tests/install-doc-debian-and-output-tarball23
-rw-r--r--tests/install-doc-debian-and-test-hooks59
-rw-r--r--tests/install-libmagic-mgc-on-foreign69
-rw-r--r--tests/invalid-mirror10
-rw-r--r--tests/jessie-or-older42
-rw-r--r--tests/keyring18
-rw-r--r--tests/keyring-overwrites15
-rw-r--r--tests/logfile22
-rw-r--r--tests/man7
-rw-r--r--tests/merged-fakechroot-inside-unmerged-chroot49
-rw-r--r--tests/mirror-is-deb6
-rw-r--r--tests/mirror-is-real-file9
-rw-r--r--tests/mirror-is-stdin6
-rw-r--r--tests/missing-dev-sys-proc-inside-the-chroot20
-rw-r--r--tests/missing-device-nodes-outside-the-chroot12
-rw-r--r--tests/mmdebstrap20
-rw-r--r--tests/mount-is-missing13
-rw-r--r--tests/multiple-include21
-rw-r--r--tests/no-sbin-in-path28
-rw-r--r--tests/not-having-to-install-apt-in-include-because-a-hook-did-it-before9
-rw-r--r--tests/pass-distribution-but-implicitly-write-to-stdout14
-rw-r--r--tests/pivot_root54
-rw-r--r--tests/preserve-mode-of-etc-resolv-conf-and-etc-hostname102
-rw-r--r--tests/progress-bars-on-fake-tty6
-rw-r--r--tests/quiet6
-rw-r--r--tests/read-from-stdin-write-to-stdout6
-rw-r--r--tests/remove-start-stop-daemon-and-policy-rc-d-in-hook8
-rw-r--r--tests/root-mode-inside-chroot28
-rw-r--r--tests/root-mode-inside-unshare-chroot40
-rw-r--r--tests/root-without-cap-sys-admin17
-rw-r--r--tests/sigint-during-customize-hook22
-rw-r--r--tests/signed-by-with-host-keys7
-rw-r--r--tests/signed-by-without-host-keys17
-rw-r--r--tests/skip-mount12
-rw-r--r--tests/skip-output-dev35
-rw-r--r--tests/skip-output-mknod30
-rw-r--r--tests/skip-start-stop-daemon-policy-rc10
-rw-r--r--tests/skip-tar-in-mknod28
-rw-r--r--tests/special-hooks-using-helpers28
-rw-r--r--tests/special-hooks-using-helpers-and-env-vars31
-rw-r--r--tests/special-hooks-with-mode-mode148
-rw-r--r--tests/stable-default-mirror20
-rw-r--r--tests/supply-components-manually7
-rw-r--r--tests/tarfilter-idshift58
-rw-r--r--tests/unpack-doc-debian57
-rw-r--r--tests/unshare-as-root-user9
-rw-r--r--tests/unshare-as-root-user-inside-chroot28
-rw-r--r--tests/unshare-include-deb49
-rw-r--r--tests/variant-custom-timeout11
-rw-r--r--tests/verbose17
-rw-r--r--tests/version6
-rw-r--r--tests/without-etc-resolv-conf-and-etc-hostname14
-rw-r--r--tests/xz-compressed-tarball7
158 files changed, 15656 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..8a205e8
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+shared
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..e0ebcf2
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,6 @@
+Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
+Johannes Schauer Marin Rodrigues <josch@mister-muffin.de> <j.schauer@email.de>
+Johannes Schauer Marin Rodrigues <josch@mister-muffin.de> <josch@debian.org>
+Johannes Schauer Marin Rodrigues <josch@mister-muffin.de> <Johannes Schauer Marin Rodrigues josch@debian.org>
+Helmut Grohne <helmut@subdivi.de> <helmut.grohne@intenta.de>
+Benjamin Drung <benjamin.drung@ionos.com> <benjamin.drung@cloud.ionos.com>
diff --git a/.perltidyrc b/.perltidyrc
new file mode 100644
index 0000000..bbc8b84
--- /dev/null
+++ b/.perltidyrc
@@ -0,0 +1,15 @@
+# mmdebstrap is a tool focused on Debian and derivatives (it relies on apt
+# after all). Thus, we use a perl style used in other Debian Perl code. The
+# following options are used in Lintian and devscripts
+
+--break-before-all-operators
+--noblanks-before-comments
+--cuddled-else
+--maximum-line-length=79
+--paren-tightness=2
+--square-bracket-tightness=2
+--space-for-semicolon
+--opening-brace-always-on-right
+--stack-opening-tokens
+--stack-closing-tokens
+--format-skipping
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..fea8b9a
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,393 @@
+1.4.3 (2024-02-01)
+------------------
+
+ - take hard links into account when computing disk usage
+
+1.4.2 (2024-01-29)
+------------------
+
+ - allow for start-stop-daemon to be in either /sbin or /usr/sbin
+ - mmdebstrap-autopkgtest-build-qemu: fix octal mode computation and hostname
+
+1.4.1 (2024-01-09)
+------------------
+
+ - set DPkg::Chroot-Directory in APT_CONFIG to simplify calling apt in hooks
+ - disallow running chrootless as root without fakeroot unless
+ --skip=check/chrootless is used
+ - only print short --help output if wrong args are passed
+ - read files passed as --aptopt and --dpkgopt outside the unshared namespace
+
+1.4.0 (2023-10-24)
+------------------
+
+ - add mmdebstrap-autopkgtest-build-qemu
+ - export container=mmdebstrap-unshare env variable in unshare-mode hooks
+ - add new skip options: output/dev, output/mknod, tar-in/mknod,
+ copy-in/mknod, sync-in/mknod
+ - stop copying qemu-$arch-static binary into the chroot
+ - tarfilter: add --type-exclude option
+ - set MMDEBSTRAP_FORMAT in hooks
+ - do not install priority:required in buildd variant following debootstrap
+
+1.3.8 (2023-08-20)
+------------------
+
+ - hooks/merged-usr: implement post-merging as debootstrap does
+ - exclude ./lost+found from tarball
+
+1.3.7 (2023-06-21)
+------------------
+
+ - add hooks/copy-host-apt-sources-and-preferences
+
+1.3.6 (2023-06-16)
+------------------
+
+ - bugfix release
+
+1.3.5 (2023-03-20)
+------------------
+
+ - bugfix release
+
+1.3.4 (2023-03-16)
+------------------
+
+ - more safeguards before automatically choosing unshare mode
+
+1.3.3 (2023-02-19)
+------------------
+
+ - testsuite improvements
+
+1.3.2 (2023-02-16)
+------------------
+
+ - unshare mode works in privileged docker containers
+
+1.3.1 (2023-01-20)
+------------------
+
+ - bugfix release
+
+1.3.0 (2023-01-16)
+------------------
+
+ - add hooks/maybe-jessie-or-older and hooks/maybe-merged-usr
+ - add --skip=check/signed-by
+ - hooks/jessie-or-older: split into two individual hook files
+ - skip running apt-get update if we are very sure that it was already run
+ - be more verbose when 'apt-get update' failed
+ - warn if a hook is named like one but not executable and if a hook is
+ executable but not named like one
+ - to find signed-by value, run gpg on the individual keys to print better
+ error messages in case it fails (gpg doesn't give an indication which file
+ it was unable to read) and print progress bar
+ - allow empty sources.list entries
+
+1.2.5 (2023-01-04)
+------------------
+
+ - bugfix release
+
+1.2.4 (2022-12-23)
+------------------
+
+ - bugfix release
+ - add jessie-or-older extract hook
+
+1.2.3 (2022-11-16)
+------------------
+
+ - use Text::ParseWords::shellwords instead of spawning a new shell
+ - mount and unmount once, instead for each run_chroot() call
+
+1.2.2 (2022-10-27)
+------------------
+
+ - allow /etc/apt/trusted.gpg.d/ not to exist
+ - always create /var/lib/dpkg/arch to make foreign architecture chrootless
+ tarballs bit-by-bit identical
+ - write an empty /etc/machine-id instead of writing 'uninitialized'
+ - only print progress bars on interactive terminals that are wide enough
+
+1.2.1 (2022-09-08)
+------------------
+
+ - bugfix release
+
+1.2.0 (2022-09-05)
+------------------
+
+ - remove proot mode
+ - error out if stdout is an interactive terminal
+ - replace taridshift by tarfilter --idshift
+ - tarfilter: add --transform option
+ - multiple --skip options can be separated by comma or whitespace
+ - also cleanup the contents of /run
+ - support apt patterns and paths with commas and whitespace in --include
+ - hooks: store the values of the --include option in MMDEBSTRAP_INCLUDE
+ - add new --skip options: chroot/start-stop-daemon, chroot/policy-rc.d
+ chroot/mount, chroot/mount/dev, chroot/mount/proc, chroot/mount/sys,
+ cleanup/run
+
+1.1.0 (2022-07-26)
+----------------
+
+ - mount a new /dev/pts instance into the chroot to make posix_openpt work
+ - adjust merged-/usr hook to work the same way as debootstrap
+ - add no-merged-usr hook
+
+1.0.1 (2022-05-29)
+------------------
+
+ - bugfix release
+
+1.0.0 (2022-05-28)
+------------------
+
+ - all documented interfaces are now considered stable
+ - allow file:// mirrors
+ - /var/cache/apt/archives/ is now allowed to contain *.deb packages
+ - add file-mirror-automount hook-dir
+ - set $MMDEBSTRAP_VERBOSITY in hooks
+ - rewrite coverage with multiple individual and skippable shell scripts
+
+0.8.6 (2022-03-25)
+------------------
+
+ - allow running root mode inside unshare mode
+
+0.8.5 (2022-03-07)
+------------------
+
+ - improve documentation
+
+0.8.4 (2022-02-11)
+------------------
+
+ - tarfilter: add --strip-components option
+ - don't install essential packages in run_install()
+ - remove /var/lib/dbus/machine-id
+
+0.8.3 (2022-01-08)
+------------------
+
+ - allow codenames with apt patterns (requires apt >= 2.3.14)
+ - don't overwrite existing files in setup code
+ - don't copy in qemu-user-static binary if it's not needed
+
+0.8.2 (2021-12-14)
+------------------
+
+ - use apt patterns to select priority variants (requires apt >= 2.3.10)
+
+0.8.1 (2021-10-07)
+------------------
+
+ - enforce dpkg >= 1.20.0 and apt >= 2.3.7
+ - allow working directory be not world readable
+ - do not run xz and zstd with --threads=0 since this is a bad default for
+ machines with more than 100 cores
+ - bit-by-bit identical chrootless mode
+
+0.8.0 (2021-09-21)
+------------------
+
+ - allow running inside chroot in root mode
+ - allow running without /dev, /sys or /proc
+ - new --format=null which gets automatically selected if the output is
+ /dev/null and doesn't produce a tarball or other permanent output
+ - allow ASCII-armored keyrings (requires gnupg >= 2.2.8)
+ - run zstd with --threads=0
+ - tarfilter: add --pax-exclude and --pax-include to strip extended attributes
+ - add --skip=setup, --skip=update and --skip=cleanup
+ - add --skip=cleanup/apt/lists and --skip=cleanup/apt/cache
+ - pass extended attributes (excluding system) to tar2sqfs
+ - use apt-get update -error-on=any (requires apt >= 2.1.16)
+ - support Debian 11 Buster
+ - use apt from outside using DPkg::Chroot-Directory (requires apt >= 2.3.7)
+ * build chroots without apt (for example from buildinfo files)
+ * no need to install additional packages like apt-transport-* or
+ ca-certificates inside the chroot
+ * no need for additional key material inside the chroot
+ * possible use of file:// and copy://
+ - use apt pattern to select essential set
+ - write 'uninitialized' to /etc/machine-id
+ - allow running in root mode without mount working, either because of missing
+ CAP_SYS_ADMIN or missing /usr/bin/mount
+ - make /etc/ld.so.cache under fakechroot mode bit-by-bit identical to root
+ and unshare mode
+ - move hooks/setup00-merged-usr.sh to hooks/merged-usr/setup00.sh
+ - add gpgvnoexpkeysig script for very old snapshot.d.o timestamps with expired
+ signature
+
+0.7.5 (2021-02-06)
+------------------
+
+ - skip emulation check for extract variant
+ - add new suite name trixie
+ - unset TMPDIR in hooks because there is no value that works inside as well as
+ outside the chroot
+ - expose hook name to hooks via MMDEBSTRAP_HOOK environment variable
+
+0.7.4 (2021-01-16)
+------------------
+
+ - Optimize mmtarfilter to handle many path exclusions
+ - Set MMDEBSTRAP_APT_CONFIG, MMDEBSTRAP_MODE and MMDEBSTRAP_HOOKSOCK for hook
+ scripts
+ - Do not run an additional env command inside the chroot
+ - Allow unshare mode as root user
+ - Additional checks whether root has the necessary privileges to mount
+ - Make most features work on Debian 10 Buster
+
+0.7.3 (2020-12-02)
+------------------
+
+ - bugfix release
+
+0.7.2 (2020-11-28)
+------------------
+
+ - check whether tools like dpkg and apt are installed at startup
+ - make it possible to seed /var/cache/apt/archives with deb packages
+ - if a suite name was specified, use the matching apt index to figure out the
+ package set to install
+ - use Debian::DistroInfo or /usr/share/distro-info/debian.csv (if available)
+ to figure out the security mirror for bullseye and beyond
+ - use argparse in tarfilter and taridshift for proper --help output
+
+0.7.1 (2020-09-18)
+------------------
+
+ - bugfix release
+
+0.7.0 (2020-08-27)
+-----------------
+
+ - the hook system (setup, extract, essential, customize and hook-dir) is made
+ public and is now a documented interface
+ - tarball is also created if the output is a named pipe or character special
+ - add --format option to control the output format independent of the output
+ filename or in cases where output is directed to stdout
+ - generate ext2 filesystems if output file ends with .ext2 or --format=ext2
+ - add --skip option to prevent some automatic actions from being carried out
+ - implement dpkg-realpath in perl so that we don't need to run tar inside the
+ chroot anymore for modes other than fakechroot and proot
+ - add ready-to-use hook scripts for eatmydata, merged-usr and busybox
+ - add tarfilter tool
+ - use distro-info-data and debootstrap to help with suite name and keyring
+ discovery
+ - no longer needs to install twice when --depkgopt=path-exclude is given
+ - variant=custom and hooks can be used as a debootstrap wrapper
+ - use File::Find instead of "du" to avoid different results on different
+ filesystems
+ - many, many bugfixes and documentation enhancements
+
+0.6.1 (2020-03-08)
+------------------
+
+ - replace /etc/machine-id with an empty file
+ - fix deterministic tar with pax and xattr support
+ - support deb822-style format apt sources
+ - mount /sys and /proc as read-only in root mode
+ - unset TMPDIR environment variable for everything running inside the chroot
+
+0.6.0 (2020-01-16)
+------------------
+
+ - allow multiple --architecture options
+ - allow multiple --include options
+ - enable parallel compression with xz by default
+ - add --man option
+ - add --keyring option overwriting apt's default keyring
+ - preserve extended attributes in tarball
+ - allow running tests on non-amd64 systems
+ - generate squashfs images if output file ends in .sqfs or .squashfs
+ - add --dry-run/--simulate options
+ - add taridshift tool
+
+0.5.1 (2019-10-19)
+------------------
+
+ - minor bugfixes and documentation clarification
+ - the --components option now takes component names as a comma or whitespace
+ separated list or as multiple --components options
+ - make_mirror.sh now has to be invoked manually before calling coverage.sh
+
+0.5.0 (2019-10-05)
+------------------
+
+ - do not unconditionally read sources.list stdin anymore
+ * if mmdebstrap is used via ssh without a pseudo-terminal, it will stall
+ forever
+ * as this is unexpected, one now has to explicitly request reading
+ sources.list from stdin in situations where it's ambiguous whether
+ that is requested
+ * thus, the following modes of operation don't work anymore:
+ $ mmdebstrap unstable /output/dir < sources.list
+ $ mmdebstrap unstable /output/dir http://mirror < sources.list
+ * instead, one now has to write:
+ $ mmdebstrap unstable /output/dir - < sources.list
+ $ mmdebstrap unstable /output/dir http://mirror - < sources.list
+ - fix binfmt_misc support on docker
+ - do not use qemu for architectures unequal the native architecture that can
+ be used without it
+ - do not copy /etc/resolv.conf or /etc/hostname if the host system doesn't
+ have them
+ - add --force-check-gpg dummy option
+ - allow hooks to remove start-stop-daemon
+ - add /var/lib/dpkg/arch in chrootless mode when chroot architecture differs
+ - create /var/lib/dpkg/cmethopt for dselect
+ - do not skip package installation in 'custom' variant
+ - fix EDSP output for external solvers so that apt doesn't mark itself as
+ Essential:yes
+ - also re-exec under fakechroot if fakechroot is picked in 'auto' mode
+ - chdir() before 'apt-get update' to accomodate for apt << 1.5
+ - add Dir::State::Status to apt config for apt << 1.3
+ - chmod 0755 on qemu-user-static binary
+ - select the right mirror for ubuntu, kali and tanglu
+
+0.4.1 (2019-03-01)
+------------------
+
+ - re-enable fakechroot mode testing
+ - disable apt sandboxing if necessary
+ - keep apt and dpkg lock files
+
+0.4.0 (2019-02-23)
+------------------
+
+ - disable merged-usr
+ - add --verbose option that prints apt and dpkg output instead of progress
+ bars
+ - add --quiet/--silent options which print nothing on stderr
+ - add --debug option for even more output than with --verbose
+ - add some no-op options to make mmdebstrap a drop-in replacement for certain
+ debootstrap wrappers like sbuild-createchroot
+ - add --logfile option which outputs to a file what would otherwise be written
+ to stderr
+ - add --version option
+
+0.3.0 (2018-11-21)
+------------------
+
+ - add chrootless mode
+ - add extract and custom variants
+ - make testsuite unprivileged through qemu and guestfish
+ - allow empty lost+found directory in target
+ - add 54 testcases and fix lots of bugs as a result
+
+0.2.0 (2018-10-03)
+------------------
+
+ - if no MIRROR was specified but there was data on standard input, then use
+ that data as the sources.list instead of falling back to the default mirror
+ - lots of bug fixes
+
+0.1.0 (2018-09-24)
+------------------
+
+ - initial release
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..bfb460a
--- /dev/null
+++ b/README.md
@@ -0,0 +1,175 @@
+mmdebstrap
+==========
+
+An alternative to debootstrap which uses apt internally and is thus able to use
+more than one mirror and resolve more complex dependencies.
+
+Usage
+-----
+
+Use like debootstrap:
+
+ sudo mmdebstrap unstable ./unstable-chroot
+
+Without superuser privileges:
+
+ mmdebstrap unstable unstable-chroot.tar
+
+With complex apt options:
+
+ cat /etc/apt/sources.list | mmdebstrap > unstable-chroot.tar
+
+For the full documentation use:
+
+ pod2man ./mmdebstrap | man -l -
+
+Or read a HTML version of the man page in either of these locations:
+
+ - https://gitlab.mister-muffin.de/josch/mmdebstrap/wiki
+ - https://manpages.debian.org/unstable/mmdebstrap/mmdebstrap.1.en.html
+
+The sales pitch in comparison to debootstrap
+--------------------------------------------
+
+Summary:
+
+ - more than one mirror possible
+ - security and updates mirror included for Debian stable chroots
+ - twice as fast
+ - chroot with apt in 11 seconds
+ - gzipped tarball with apt is 27M small
+ - bit-by-bit reproducible output
+ - unprivileged operation using Linux user namespaces or fakechroot
+ - can operate on filesystems mounted with nodev
+ - foreign architecture chroots with qemu-user
+ - variant installing only Essential:yes packages and dependencies
+ - temporary chroots by redirecting to /dev/null
+ - chroots without apt inside (for chroot from buildinfo file with debootsnap)
+
+The author believes that a chroot of a Debian stable release should include the
+latest packages including security fixes by default. This has been a wontfix
+with debootstrap since 2009 (See #543819 and #762222). Since mmdebstrap uses
+apt internally, support for multiple mirrors comes for free and stable or
+oldstable **chroots will include security and updates mirrors**.
+
+A side-effect of using apt is being twice as fast as debootstrap. The
+timings were carried out on a laptop with an Intel Core i5-5200U, using a
+mirror on localhost and a tmpfs.
+
+| variant | mmdebstrap | debootstrap |
+| --------- | ---------- | ------------ |
+| essential | 9.52 s | n.a |
+| apt | 10.98 s | n.a |
+| minbase | 13.54 s | 26.37 s |
+| buildd | 21.31 s | 34.85 s |
+| - | 23.01 s | 48.83 s |
+
+Apt considers itself an `Essential: yes` package. This feature allows one to
+create a chroot containing just the `Essential: yes` packages and apt (and
+their hard dependencies) in **just 11 seconds**.
+
+If desired, a most minimal chroot with just the `Essential: yes` packages and
+their hard dependencies can be created with a gzipped tarball size of just 34M.
+By using dpkg's `--path-exclude` option to exclude documentation, even smaller
+gzipped tarballs of 21M in size are possible. If apt is included, the result is
+a **gzipped tarball of only 27M**.
+
+These small sizes are also achieved because apt caches and other cruft is
+stripped from the chroot. This also makes the result **bit-by-bit
+reproducible** if the `$SOURCE_DATE_EPOCH` environment variable is set.
+
+The author believes, that it should not be necessary to have superuser
+privileges to create a file (the chroot tarball) in one's home directory.
+Thus, mmdebstrap provides multiple options to create a chroot tarball with the
+right permissions **without superuser privileges**. This avoids a whole class
+of bugs like #921815. Depending on what is available, it uses either Linux user
+namespaces or fakechroot. Debootstrap supports fakechroot but will not
+create a tarball with the right permissions by itself. Support for Linux user
+namespaces is missing (see #829134).
+
+When creating a chroot tarball with debootstrap, the temporary chroot directory
+cannot be on a filesystem that has been mounted with nodev. In unprivileged
+mode, **mknod is never used**, which means that /tmp can be used as a temporary
+directory location even if if it's mounted with nodev as a security measure.
+
+If the chroot architecture cannot be executed by the current machine, qemu-user
+is used to allow one to create a **foreign architecture chroot**.
+
+Limitations in comparison to debootstrap
+----------------------------------------
+
+Debootstrap supports creating a Debian chroot on non-Debian systems but
+mmdebstrap requires apt and is thus limited to Debian and derivatives. This
+means that mmdebstrap can never fully replace debootstrap and debootstrap will
+continue to be relevant in situations where you want to create a Debian chroot
+from a platform without apt and dpkg.
+
+There is no `SCRIPT` argument.
+
+The following options, don't exist: `--second-stage`, `--exclude`,
+`--resolve-deps`, `--force-check-gpg`, `--merged-usr` and `--no-merged-usr`.
+
+The quirks from debootstrap are needed to create chroots of Debian unstable
+from snapshot.d.o before timestamp 20141107T220431Z or Debian 8 (Jessie) or
+later.
+
+Tests
+=====
+
+The script `coverage.sh` runs mmdebstrap in all kind of scenarios to execute
+all code paths of the script. It verifies its output in each scenario and
+displays the results gathered with Devel::Cover. It also compares the output of
+mmdebstrap with debootstrap in several scenarios. To run the testsuite, run:
+
+ ./make_mirror.sh
+ CMD=./mmdebstrap ./coverage.sh
+
+To also generate perl Devel::Cover data, omit the `CMD` environment variable.
+But that will also take a lot longer.
+
+The `make_mirror.sh` script will be a no-op if nothing changed in Debian
+unstable. You don't need to run `make_mirror.sh` before every invocation of
+`coverage.sh`. When you make changes to `make_mirror.sh` and want to regenerate
+the cache, run:
+
+ touch -d yesterday shared/cache/debian/dists/unstable/Release
+
+The script `coverage.sh` does not need an active internet connection by
+default. An online connection is only needed by the `make_mirror.sh` script
+which fills a local cache with a few minimal Debian mirror copies.
+
+By default, `coverage.sh` will skip running a single test which tries creating
+a Ubuntu Focal chroot. To not skip that test, run `coverage.sh` with the
+environment variable `ONLINE=yes`.
+
+If a test fails you can run individual tests by executing `coverage.py` with
+the test name and optionally limit it to a specific distribution like so:
+
+ CMD=./mmdebstrap ./coverage.py --dist unstable check-against-debootstrap-dist
+
+Bugs
+====
+
+mmdebstrap has bugs. Report them here:
+https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
+
+Contributors
+============
+
+ - Johannes Schauer Marin Rodrigues (main author)
+ - Helmut Grohne
+ - Gioele Barabucci
+ - Benjamin Drung
+ - Jochen Sprickerhof
+ - Josh Triplett
+ - Konstantin Demin
+ - David Kalnischkies
+ - Emilio Pozuelo Monfort
+ - Francesco Poli
+ - Jakub Wilk
+ - Joe Groocock
+ - Nicolas Vigier
+ - Raul Tambre
+ - Steve Dodd
+ - Trent W. Buck
+ - Vagrant Cascadian
diff --git a/caching_proxy.py b/caching_proxy.py
new file mode 100755
index 0000000..e57a851
--- /dev/null
+++ b/caching_proxy.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+
+import sys
+import os
+import time
+import http.client
+import http.server
+from io import StringIO
+import pathlib
+import urllib.parse
+
+oldcachedir = None
+newcachedir = None
+readonly = False
+
+
+class ProxyRequestHandler(http.server.BaseHTTPRequestHandler):
+ def do_GET(self):
+ assert int(self.headers.get("Content-Length", 0)) == 0
+ assert self.headers["Host"]
+ pathprefix = "http://" + self.headers["Host"] + "/"
+ assert self.path.startswith(pathprefix)
+ sanitizedpath = urllib.parse.unquote(self.path.removeprefix(pathprefix))
+ oldpath = oldcachedir / sanitizedpath
+ newpath = newcachedir / sanitizedpath
+
+ if not readonly:
+ newpath.parent.mkdir(parents=True, exist_ok=True)
+
+ # just send back to client
+ if newpath.exists():
+ print(f"proxy cached: {self.path}", file=sys.stderr)
+ self.wfile.write(b"HTTP/1.1 200 OK\r\n")
+ self.send_header("Content-Length", newpath.stat().st_size)
+ self.end_headers()
+ with newpath.open(mode="rb") as new:
+ while True:
+ buf = new.read(64 * 1024) # same as shutil uses
+ if not buf:
+ break
+ self.wfile.write(buf)
+ self.wfile.flush()
+ return
+
+ if readonly:
+ newpath = pathlib.Path("/dev/null")
+
+ # copy from oldpath to newpath and send back to client
+ # Only take files from the old cache if they are .deb files or Packages
+ # files in the by-hash directory as only those are unique by their path
+ # name. Other files like InRelease files have to be downloaded afresh.
+ if oldpath.exists() and (
+ oldpath.suffix == ".deb" or "by-hash" in oldpath.parts
+ ):
+ print(f"proxy cached: {self.path}", file=sys.stderr)
+ self.wfile.write(b"HTTP/1.1 200 OK\r\n")
+ self.send_header("Content-Length", oldpath.stat().st_size)
+ self.end_headers()
+ with oldpath.open(mode="rb") as old, newpath.open(mode="wb") as new:
+ # we are not using shutil.copyfileobj() because we want to
+ # write to two file objects simultaneously
+ while True:
+ buf = old.read(64 * 1024) # same as shutil uses
+ if not buf:
+ break
+ self.wfile.write(buf)
+ new.write(buf)
+ self.wfile.flush()
+ return
+
+ # download fresh copy
+ try:
+ print(f"\rproxy download: {self.path}", file=sys.stderr)
+ conn = http.client.HTTPConnection(self.headers["Host"], timeout=5)
+ conn.request("GET", self.path, None, dict(self.headers))
+ res = conn.getresponse()
+ assert (res.status, res.reason) == (200, "OK"), (res.status, res.reason)
+ self.wfile.write(b"HTTP/1.1 200 OK\r\n")
+ for k, v in res.getheaders():
+ # do not allow a persistent connection
+ if k == "connection":
+ continue
+ self.send_header(k, v)
+ self.end_headers()
+ with newpath.open(mode="wb") as f:
+ # we are not using shutil.copyfileobj() because we want to
+ # write to two file objects simultaneously and throttle the
+ # writing speed to 1024 kB/s
+ while True:
+ buf = res.read(64 * 1024) # same as shutil uses
+ if not buf:
+ break
+ self.wfile.write(buf)
+ f.write(buf)
+ time.sleep(64 / 1024) # 1024 kB/s
+ self.wfile.flush()
+ except Exception as e:
+ self.send_error(502)
+
+
+def main():
+ global oldcachedir, newcachedir, readonly
+ if sys.argv[1] == "--readonly":
+ readonly = True
+ oldcachedir = pathlib.Path(sys.argv[2])
+ newcachedir = pathlib.Path(sys.argv[3])
+ else:
+ oldcachedir = pathlib.Path(sys.argv[1])
+ newcachedir = pathlib.Path(sys.argv[2])
+ print(f"starting caching proxy for {newcachedir}", file=sys.stderr)
+ httpd = http.server.ThreadingHTTPServer(
+ server_address=("", 8080), RequestHandlerClass=ProxyRequestHandler
+ )
+ httpd.serve_forever()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/coverage.py b/coverage.py
new file mode 100755
index 0000000..7e911cf
--- /dev/null
+++ b/coverage.py
@@ -0,0 +1,457 @@
+#!/usr/bin/env python3
+
+from debian.deb822 import Deb822, Release
+import email.utils
+import os
+import sys
+import shutil
+import subprocess
+import argparse
+import time
+from datetime import timedelta
+from collections import defaultdict
+from itertools import product
+
+have_qemu = os.getenv("HAVE_QEMU", "yes") == "yes"
+have_binfmt = os.getenv("HAVE_BINFMT", "yes") == "yes"
+run_ma_same_tests = os.getenv("RUN_MA_SAME_TESTS", "yes") == "yes"
+use_host_apt_config = os.getenv("USE_HOST_APT_CONFIG", "no") == "yes"
+cmd = os.getenv("CMD", "./mmdebstrap")
+
+default_dist = os.getenv("DEFAULT_DIST", "unstable")
+all_dists = ["oldstable", "stable", "testing", "unstable"]
+default_mode = "auto"
+all_modes = ["auto", "root", "unshare", "fakechroot", "chrootless"]
+default_variant = "apt"
+all_variants = [
+ "extract",
+ "custom",
+ "essential",
+ "apt",
+ "minbase",
+ "buildd",
+ "-",
+ "standard",
+]
+default_format = "auto"
+all_formats = ["auto", "directory", "tar", "squashfs", "ext2", "null"]
+
+mirror = os.getenv("mirror", "http://127.0.0.1/debian")
+hostarch = subprocess.check_output(["dpkg", "--print-architecture"]).decode().strip()
+
+release_path = f"./shared/cache/debian/dists/{default_dist}/InRelease"
+if not os.path.exists(release_path):
+ print("path doesn't exist:", release_path, file=sys.stderr)
+ print("run ./make_mirror.sh first", file=sys.stderr)
+ exit(1)
+if os.getenv("SOURCE_DATE_EPOCH") is not None:
+ s_d_e = os.getenv("SOURCE_DATE_EPOCH")
+else:
+ with open(release_path) as f:
+ rel = Release(f)
+ s_d_e = str(email.utils.mktime_tz(email.utils.parsedate_tz(rel["Date"])))
+
+separator = (
+ "------------------------------------------------------------------------------"
+)
+
+
+def skip(condition, dist, mode, variant, fmt):
+ if not condition:
+ return ""
+ for line in condition.splitlines():
+ if not line:
+ continue
+ if eval(line):
+ return line.strip()
+ return ""
+
+
+def parse_config(confname):
+ config_dict = defaultdict(dict)
+ config_order = list()
+ all_vals = {
+ "Dists": all_dists,
+ "Modes": all_modes,
+ "Variants": all_variants,
+ "Formats": all_formats,
+ }
+ with open(confname) as f:
+ for test in Deb822.iter_paragraphs(f):
+ if "Test" not in test.keys():
+ print("Test without name", file=sys.stderr)
+ exit(1)
+ name = test["Test"]
+ config_order.append(name)
+ for k in test.keys():
+ v = test[k]
+ if k not in [
+ "Test",
+ "Dists",
+ "Modes",
+ "Variants",
+ "Formats",
+ "Skip-If",
+ "Needs-QEMU",
+ "Needs-Root",
+ "Needs-APT-Config",
+ ]:
+ print(f"Unknown field name {k} in test {name}")
+ exit(1)
+ if k in all_vals.keys():
+ if v == "default":
+ print(
+ f"Setting {k} to default in Test {name} is redundant",
+ file=sys.stderr,
+ )
+ exit(1)
+ if v == "any":
+ v = all_vals[k]
+ else:
+ # else, split the value by whitespace
+ v = v.split()
+ for i in v:
+ if i not in all_vals[k]:
+ print(
+ f"{i} is not a valid value for {k}", file=sys.stderr
+ )
+ exit(1)
+ config_dict[name][k] = v
+ return config_order, config_dict
+
+
+def format_test(num, total, name, dist, mode, variant, fmt, config_dict):
+ ret = f"({num}/{total}) {name}"
+ if len(config_dict[name].get("Dists", [])) > 1:
+ ret += f" --dist={dist}"
+ if len(config_dict[name].get("Modes", [])) > 1:
+ ret += f" --mode={mode}"
+ if len(config_dict[name].get("Variants", [])) > 1:
+ ret += f" --variant={variant}"
+ if len(config_dict[name].get("Formats", [])) > 1:
+ ret += f" --format={fmt}"
+ return ret
+
+
+def print_time_per_test(time_per_test, name="test"):
+ print(
+ f"average time per {name}:",
+ sum(time_per_test.values(), start=timedelta()) / len(time_per_test),
+ file=sys.stderr,
+ )
+ print(
+ f"median time per {name}:",
+ sorted(time_per_test.values())[len(time_per_test) // 2],
+ file=sys.stderr,
+ )
+ head_tail_num = 10
+ print(f"{head_tail_num} fastests {name}s:", file=sys.stderr)
+ for k, v in sorted(time_per_test.items(), key=lambda i: i[1])[
+ : min(head_tail_num, len(time_per_test))
+ ]:
+ print(f" {k}: {v}", file=sys.stderr)
+ print(f"{head_tail_num} slowest {name}s:", file=sys.stderr)
+ for k, v in sorted(time_per_test.items(), key=lambda i: i[1], reverse=True)[
+ : min(head_tail_num, len(time_per_test))
+ ]:
+ print(f" {k}: {v}", file=sys.stderr)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("test", nargs="*", help="only run these tests")
+ parser.add_argument(
+ "-x",
+ "--exitfirst",
+ action="store_const",
+ dest="maxfail",
+ const=1,
+ help="exit instantly on first error or failed test.",
+ )
+ parser.add_argument(
+ "--maxfail",
+ metavar="num",
+ action="store",
+ type=int,
+ dest="maxfail",
+ default=0,
+ help="exit after first num failures or errors.",
+ )
+ parser.add_argument(
+ "--mode",
+ metavar="mode",
+ help=f"only run tests with this mode (Default = {default_mode})",
+ )
+ parser.add_argument(
+ "--dist",
+ metavar="dist",
+ help=f"only run tests with this dist (Default = {default_dist})",
+ )
+ parser.add_argument(
+ "--variant",
+ metavar="variant",
+ help=f"only run tests with this variant (Default = {default_variant})",
+ )
+ parser.add_argument(
+ "--format",
+ metavar="format",
+ help=f"only run tests with this format (Default = {default_format})",
+ )
+ parser.add_argument(
+ "--skip", metavar="test", action="append", help="skip this test"
+ )
+ args = parser.parse_args()
+
+ # copy over files from git or as distributed
+ for git, dist, target in [
+ ("./mmdebstrap", "/usr/bin/mmdebstrap", "mmdebstrap"),
+ ("./tarfilter", "/usr/bin/mmtarfilter", "tarfilter"),
+ (
+ "./proxysolver",
+ "/usr/lib/apt/solvers/mmdebstrap-dump-solution",
+ "proxysolver",
+ ),
+ (
+ "./ldconfig.fakechroot",
+ "/usr/libexec/mmdebstrap/ldconfig.fakechroot",
+ "ldconfig.fakechroot",
+ ),
+ ]:
+ if os.path.exists(git):
+ shutil.copy(git, f"shared/{target}")
+ else:
+ shutil.copy(dist, f"shared/{target}")
+ # copy over hooks from git or as distributed
+ if os.path.exists("hooks"):
+ shutil.copytree("hooks", "shared/hooks", dirs_exist_ok=True)
+ else:
+ shutil.copytree(
+ "/usr/share/mmdebstrap/hooks", "shared/hooks", dirs_exist_ok=True
+ )
+
+ # parse coverage.txt
+ config_order, config_dict = parse_config("coverage.txt")
+
+ indirbutnotcovered = set(
+ [d for d in os.listdir("tests") if not d.startswith(".")]
+ ) - set(config_order)
+ if indirbutnotcovered:
+ print(
+ "test(s) missing from coverage.txt: %s"
+ % (", ".join(sorted(indirbutnotcovered))),
+ file=sys.stderr,
+ )
+ exit(1)
+ coveredbutnotindir = set(config_order) - set(
+ [d for d in os.listdir("tests") if not d.startswith(".")]
+ )
+ if coveredbutnotindir:
+ print(
+ "test(s) missing from ./tests: %s"
+ % (", ".join(sorted(coveredbutnotindir))),
+ file=sys.stderr,
+ )
+
+ exit(1)
+
+ # produce the list of tests using the cartesian product of all allowed
+ # dists, modes, variants and formats of a given test
+ tests = []
+ for name in config_order:
+ test = config_dict[name]
+ for dist, mode, variant, fmt in product(
+ test.get("Dists", [default_dist]),
+ test.get("Modes", [default_mode]),
+ test.get("Variants", [default_variant]),
+ test.get("Formats", [default_format]),
+ ):
+ skipreason = skip(test.get("Skip-If"), dist, mode, variant, fmt)
+ if skipreason:
+ tt = ("skip", skipreason)
+ elif (
+ test.get("Needs-APT-Config", "false") == "true" and use_host_apt_config
+ ):
+ tt = ("skip", "test cannot use host apt config")
+ elif have_qemu:
+ tt = "qemu"
+ elif test.get("Needs-QEMU", "false") == "true":
+ tt = ("skip", "test needs QEMU")
+ elif test.get("Needs-Root", "false") == "true":
+ tt = "sudo"
+ elif mode == "root":
+ tt = "sudo"
+ else:
+ tt = "null"
+ tests.append((tt, name, dist, mode, variant, fmt))
+
+ torun = []
+ num_tests = len(tests)
+ if args.test:
+ # check if all given tests are either a valid name or a valid number
+ for test in args.test:
+ if test in [name for (_, name, _, _, _, _) in tests]:
+ continue
+ if not test.isdigit():
+ print(f"cannot find test named {test}", file=sys.stderr)
+ exit(1)
+ if int(test) >= len(tests) or int(test) <= 0 or str(int(test)) != test:
+ print(f"test number {test} doesn't exist", file=sys.stderr)
+ exit(1)
+
+ for i, (_, name, _, _, _, _) in enumerate(tests):
+ # if either the number or test name matches, then we use this test,
+ # otherwise we skip it
+ if name in args.test:
+ torun.append(i)
+ if str(i + 1) in args.test:
+ torun.append(i)
+ num_tests = len(torun)
+
+ starttime = time.time()
+ skipped = defaultdict(list)
+ failed = []
+ num_success = 0
+ num_finished = 0
+ time_per_test = {}
+ acc_time_per_test = defaultdict(list)
+ for i, (test, name, dist, mode, variant, fmt) in enumerate(tests):
+ if torun and i not in torun:
+ continue
+ print(separator, file=sys.stderr)
+ print("(%d/%d) %s" % (i + 1, len(tests), name), file=sys.stderr)
+ print("dist: %s" % dist, file=sys.stderr)
+ print("mode: %s" % mode, file=sys.stderr)
+ print("variant: %s" % variant, file=sys.stderr)
+ print("format: %s" % fmt, file=sys.stderr)
+ if num_finished > 0:
+ currenttime = time.time()
+ timeleft = timedelta(
+ seconds=int(
+ (num_tests - num_finished)
+ * (currenttime - starttime)
+ / num_finished
+ )
+ )
+ print("time left: %s" % timeleft, file=sys.stderr)
+ if failed:
+ print("failed: %d" % len(failed), file=sys.stderr)
+ num_finished += 1
+ with open("tests/" + name) as fin, open("shared/test.sh", "w") as fout:
+ for line in fin:
+ line = line.replace("{{ CMD }}", cmd)
+ line = line.replace("{{ SOURCE_DATE_EPOCH }}", s_d_e)
+ line = line.replace("{{ DIST }}", dist)
+ line = line.replace("{{ MIRROR }}", mirror)
+ line = line.replace("{{ MODE }}", mode)
+ line = line.replace("{{ VARIANT }}", variant)
+ line = line.replace("{{ FORMAT }}", fmt)
+ line = line.replace("{{ HOSTARCH }}", hostarch)
+ fout.write(line)
+ # ignore:
+ # SC2016 Expressions don't expand in single quotes, use double quotes for that.
+ # SC2050 This expression is constant. Did you forget the $ on a variable?
+ # SC2194 This word is constant. Did you forget the $ on a variable?
+ shellcheck = subprocess.run(
+ [
+ "shellcheck",
+ "--exclude=SC2050,SC2194,SC2016",
+ "-f",
+ "gcc",
+ "shared/test.sh",
+ ],
+ check=False,
+ stdout=subprocess.PIPE,
+ ).stdout.decode()
+ argv = None
+ match test:
+ case "qemu":
+ argv = ["./run_qemu.sh"]
+ case "sudo":
+ argv = ["./run_null.sh", "SUDO"]
+ case "null":
+ argv = ["./run_null.sh"]
+ case ("skip", reason):
+ skipped[reason].append(
+ format_test(
+ i + 1, len(tests), name, dist, mode, variant, fmt, config_dict
+ )
+ )
+ print(f"skipped because of {reason}", file=sys.stderr)
+ continue
+ print(separator, file=sys.stderr)
+ if args.skip and name in args.skip:
+ print(f"skipping because of --skip={name}", file=sys.stderr)
+ continue
+ if args.dist and args.dist != dist:
+ print(f"skipping because of --dist={args.dist}", file=sys.stderr)
+ continue
+ if args.mode and args.mode != mode:
+ print(f"skipping because of --mode={args.mode}", file=sys.stderr)
+ continue
+ if args.variant and args.variant != variant:
+ print(f"skipping because of --variant={args.variant}", file=sys.stderr)
+ continue
+ if args.format and args.format != fmt:
+ print(f"skipping because of --format={args.format}", file=sys.stderr)
+ continue
+ before = time.time()
+ proc = subprocess.Popen(argv)
+ try:
+ proc.wait()
+ except KeyboardInterrupt:
+ proc.terminate()
+ proc.wait()
+ break
+ after = time.time()
+ walltime = timedelta(seconds=int(after - before))
+ formated_test_name = format_test(
+ i + 1, len(tests), name, dist, mode, variant, fmt, config_dict
+ )
+ time_per_test[formated_test_name] = walltime
+ acc_time_per_test[name].append(walltime)
+ print(separator, file=sys.stderr)
+ print(f"duration: {walltime}", file=sys.stderr)
+ if proc.returncode != 0 or shellcheck != "":
+ if shellcheck != "":
+ print(shellcheck)
+ failed.append(formated_test_name)
+ print("result: FAILURE", file=sys.stderr)
+ else:
+ print("result: SUCCESS", file=sys.stderr)
+ num_success += 1
+ if args.maxfail and len(failed) >= args.maxfail:
+ break
+ print(separator, file=sys.stderr)
+ print(
+ "successfully ran %d tests" % num_success,
+ file=sys.stderr,
+ )
+ if skipped:
+ print("skipped %d:" % sum([len(v) for v in skipped.values()]), file=sys.stderr)
+ for reason, l in skipped.items():
+ print(f"skipped because of {reason}:", file=sys.stderr)
+ for t in l:
+ print(f" {t}", file=sys.stderr)
+ if len(time_per_test) > 1:
+ print_time_per_test(time_per_test)
+ if len(acc_time_per_test) > 1:
+ print_time_per_test(
+ {
+ f"{len(v)}x {k}": sum(v, start=timedelta())
+ for k, v in acc_time_per_test.items()
+ },
+ "accumulated test",
+ )
+ if failed:
+ print("failed %d:" % len(failed), file=sys.stderr)
+ for f in failed:
+ print(f, file=sys.stderr)
+ currenttime = time.time()
+ walltime = timedelta(seconds=int(currenttime - starttime))
+ print(f"total runtime: {walltime}", file=sys.stderr)
+ if failed:
+ exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/coverage.sh b/coverage.sh
new file mode 100755
index 0000000..c555c8e
--- /dev/null
+++ b/coverage.sh
@@ -0,0 +1,114 @@
+#!/bin/sh
+
+set -eu
+
+# by default, use the mmdebstrap executable in the current directory together
+# with perl Devel::Cover but allow to overwrite this
+: "${CMD:=perl -MDevel::Cover=-silent,-nogcov ./mmdebstrap}"
+
+case "$CMD" in
+ "mmdebstrap "*|mmdebstrap|*" mmdebstrap"|*" mmdebstrap "*)
+ MMSCRIPT="$(command -v mmdebstrap 2>/dev/null)";;
+ *) MMSCRIPT=./mmdebstrap;;
+esac
+
+if [ -e "$MMSCRIPT" ]; then
+ TMPFILE=$(mktemp)
+ perltidy < "$MMSCRIPT" > "$TMPFILE"
+ ret=0
+ diff -u "$MMSCRIPT" "$TMPFILE" || ret=$?
+ if [ "$ret" -ne 0 ]; then
+ echo "perltidy failed" >&2
+ rm "$TMPFILE"
+ exit 1
+ fi
+ rm "$TMPFILE"
+
+ if [ "$(sed -e '/^__END__$/,$d' "$MMSCRIPT" | wc --max-line-length)" -gt 79 ]; then
+ echo "exceeded maximum line length of 79 characters" >&2
+ exit 1
+ fi
+
+ perlcritic --severity 4 --verbose 8 "$MMSCRIPT"
+
+ pod2man "$MMSCRIPT" >/dev/null
+fi
+
+for f in tarfilter coverage.py caching_proxy.py; do
+ [ -e "./$f" ] || continue
+ black --check "./$f"
+done
+
+shellcheck --exclude=SC2016 coverage.sh make_mirror.sh run_null.sh run_qemu.sh gpgvnoexpkeysig mmdebstrap-autopkgtest-build-qemu hooks/*/*.sh
+
+mirrordir="./shared/cache/debian"
+
+if [ ! -e "$mirrordir" ]; then
+ echo "run ./make_mirror.sh before running $0" >&2
+ exit 1
+fi
+
+# we use -f because the file might not exist
+rm -f shared/cover_db.img
+
+: "${DEFAULT_DIST:=unstable}"
+: "${HAVE_QEMU:=yes}"
+: "${RUN_MA_SAME_TESTS:=yes}"
+
+if [ "$HAVE_QEMU" = "yes" ]; then
+ # prepare image for cover_db
+ fallocate -l 64M shared/cover_db.img
+ /usr/sbin/mkfs.vfat shared/cover_db.img
+
+ if [ ! -e "./shared/cache/debian-$DEFAULT_DIST.ext4" ]; then
+ echo "./shared/cache/debian-$DEFAULT_DIST.ext4 does not exist" >&2
+ exit 1
+ fi
+fi
+
+# choose the timestamp of the unstable Release file, so that we get
+# reproducible results for the same mirror timestamp
+SOURCE_DATE_EPOCH=$(date --date="$(grep-dctrl -s Date -n '' "$mirrordir/dists/$DEFAULT_DIST/Release")" +%s)
+
+# for traditional sort order that uses native byte values
+export LC_ALL=C.UTF-8
+
+: "${HAVE_BINFMT:=yes}"
+
+mirror="http://127.0.0.1/debian"
+
+export HAVE_QEMU HAVE_BINFMT RUN_MA_SAME_TESTS DEFAULT_DIST SOURCE_DATE_EPOCH CMD mirror
+
+./coverage.py "$@"
+
+if [ -e shared/cover_db.img ]; then
+ # produce report inside the VM to make sure that the versions match or
+ # otherwise we might get:
+ # Can't read shared/cover_db/runs/1598213854.252.64287/cover.14 with Sereal: Sereal: Error: Bad Sereal header: Not a valid Sereal document. at offset 1 of input at srl_decoder.c line 600 at /usr/lib/x86_64-linux-gnu/perl5/5.30/Devel/Cover/DB/IO/Sereal.pm line 34, <$fh> chunk 1.
+ cat << END > shared/test.sh
+cover -nogcov -report html_basic cover_db >&2
+mkdir -p report
+for f in common.js coverage.html cover.css css.js mmdebstrap--branch.html mmdebstrap--condition.html mmdebstrap.html mmdebstrap--subroutine.html standardista-table-sorting.js; do
+ cp -a cover_db/\$f report
+done
+cover -delete cover_db >&2
+END
+ if [ "$HAVE_QEMU" = "yes" ]; then
+ ./run_qemu.sh
+ else
+ ./run_null.sh
+ fi
+
+ echo
+ echo "open file://$(pwd)/shared/report/coverage.html in a browser"
+ echo
+fi
+
+# check if the wiki has to be updated with pod2markdown output
+if [ "${DEBEMAIL:-}" = "josch@debian.org" ]; then
+ bash -exc "diff -u <(curl --silent https://gitlab.mister-muffin.de/josch/mmdebstrap/wiki/raw/Home | dos2unix) <(pod2markdown < mmdebstrap)" || :
+fi
+
+rm -f shared/test.sh shared/tar1.txt shared/tar2.txt shared/pkglist.txt shared/doc-debian.tar.list shared/mmdebstrap shared/tarfilter shared/proxysolver
+
+echo "$0 finished successfully" >&2
diff --git a/coverage.txt b/coverage.txt
new file mode 100644
index 0000000..fb09b19
--- /dev/null
+++ b/coverage.txt
@@ -0,0 +1,436 @@
+Test: debootstrap
+Dists: any
+Variants: minbase buildd -
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: check-against-debootstrap-dist
+Dists: any
+Variants: minbase buildd -
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: as-debootstrap-unshare-wrapper
+Modes: unshare
+Needs-Root: true
+Variants: minbase -
+Needs-APT-Config: true
+
+Test: help
+
+Test: man
+
+Test: version
+
+Test: create-directory
+Needs-Root: true
+
+Test: unshare-as-root-user
+Needs-Root: true
+
+Test: dist-using-codename
+Dists: any
+Needs-APT-Config: true
+
+Test: fail-without-etc-subuid
+Needs-QEMU: true
+
+Test: fail-without-username-in-etc-subuid
+Needs-QEMU: true
+
+Test: unshare-as-root-user-inside-chroot
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: root-mode-inside-chroot
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: root-mode-inside-unshare-chroot
+Modes: unshare
+Needs-APT-Config: true
+
+Test: root-without-cap-sys-admin
+Needs-Root: true
+
+Test: mount-is-missing
+Needs-QEMU: true
+
+Test: mmdebstrap
+Needs-Root: true
+Modes: root
+Formats: tar squashfs ext2
+Variants: essential apt minbase buildd - standard
+Skip-If:
+ variant == "standard" and dist == "oldstable" # #864082, #1004557, #1004558
+ mode == "fakechroot" and variant in ["-", "standard"] # no extended attributes
+ variant == "standard" and hostarch in ["armel", "armhf", "mipsel"] # #1031276
+
+Test: check-for-bit-by-bit-identical-format-output
+Modes: unshare fakechroot
+Formats: tar squashfs ext2
+Variants: essential apt minbase buildd - standard
+Skip-If:
+ variant == "standard" and dist == "oldstable" # #864082, #1004557, #1004558
+ mode == "fakechroot" and variant in ["-", "standard"] # no extended attributes
+ variant == "standard" and hostarch in ["armel", "armhf", "mipsel"] # #1031276
+
+Test: tarfilter-idshift
+Needs-QEMU: true
+
+Test: progress-bars-on-fake-tty
+
+Test: debug-output-on-fake-tty
+
+Test: existing-empty-directory
+Needs-Root: true
+
+Test: existing-directory-with-lost-found
+Needs-Root: true
+
+Test: fail-installing-to-non-empty-lost-found
+
+Test: fail-installing-to-non-empty-target-directory
+
+Test: missing-device-nodes-outside-the-chroot
+Needs-QEMU: true
+
+Test: missing-dev-sys-proc-inside-the-chroot
+Modes: unshare
+Variants: custom
+
+Test: chroot-directory-not-accessible-by-apt-user
+Needs-Root: true
+
+Test: cwd-directory-not-accessible-by-unshared-user
+Needs-Root: true
+Modes: unshare
+
+Test: create-gzip-compressed-tarball
+
+Test: custom-tmpdir
+Needs-Root: true
+Modes: unshare
+
+Test: xz-compressed-tarball
+
+Test: directory-ending-in-tar
+Modes: root
+Needs-Root: true
+
+Test: auto-mode-without-unshare-capabilities
+Needs-QEMU: true
+
+Test: fail-with-missing-lz4
+
+Test: fail-with-path-with-quotes
+
+Test: create-tarball-with-tmp-mounted-nodev
+Needs-QEMU: true
+
+Test: read-from-stdin-write-to-stdout
+
+Test: supply-components-manually
+Modes: root
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: stable-default-mirror
+Needs-QEMU: true
+
+Test: pass-distribution-but-implicitly-write-to-stdout
+Needs-QEMU: true
+
+Test: aspcud-apt-solver
+
+Test: mirror-is-stdin
+
+Test: copy-mirror
+Needs-QEMU: true
+
+Test: file-mirror
+Needs-QEMU: true
+
+Test: file-mirror-automount-hook
+Modes: root unshare fakechroot
+Needs-QEMU: true
+
+Test: mirror-is-deb
+
+Test: mirror-is-real-file
+Needs-APT-Config: true
+
+Test: deb822-1-2
+Modes: root
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: deb822-2-2
+Modes: root
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: automatic-mirror-from-suite
+Needs-QEMU: true
+
+Test: invalid-mirror
+Needs-APT-Config: true
+
+Test: fail-installing-to-root
+Modes: root
+Needs-Root: true
+
+Test: fail-installing-to-existing-file
+Modes: root
+Needs-Root: true
+
+Test: arm64-without-qemu-support
+Needs-QEMU: true
+Skip-If: hostarch != "amd64"
+
+Test: i386-which-can-be-executed-without-qemu
+Needs-QEMU: true
+Skip-If:
+ hostarch != "amd64"
+ not run_ma_same_tests
+
+Test: include-foreign-libmagic-mgc
+Needs-Root: true
+Needs-APT-Config: true
+Skip-If:
+ hostarch not in ["amd64", "arm64"]
+ not run_ma_same_tests
+
+Test: include-foreign-libmagic-mgc-with-multiple-arch-options
+Needs-Root: true
+Needs-APT-Config: true
+Skip-If:
+ hostarch not in ["amd64", "arm64"]
+ not run_ma_same_tests
+
+Test: aptopt
+Needs-Root: true
+
+Test: keyring
+Needs-QEMU: true
+
+Test: keyring-overwrites
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: signed-by-without-host-keys
+Needs-QEMU: true
+
+Test: ascii-armored-keys
+Needs-QEMU: true
+
+Test: signed-by-with-host-keys
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: dpkgopt
+Needs-Root: true
+
+Test: include
+Needs-Root: true
+
+Test: multiple-include
+Needs-Root: true
+
+Test: include-with-multiple-apt-sources
+Needs-Root: true
+
+Test: essential-hook
+Needs-Root: true
+
+Test: customize-hook
+Needs-Root: true
+
+Test: failing-customize-hook
+Needs-Root: true
+
+Test: sigint-during-customize-hook
+Needs-Root: true
+
+Test: hook-directory
+Needs-Root: true
+
+Test: eatmydata-via-hook-dir
+Needs-Root: true
+
+Test: special-hooks-using-helpers
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: special-hooks-using-helpers-and-env-vars
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: special-hooks-with-mode-mode
+Modes: root unshare fakechroot
+
+Test: debootstrap-no-op-options
+Needs-Root: true
+
+Test: verbose
+Variants: - standard
+Skip-If:
+ variant == "-" and hostarch not in ["armel", "armhf", "mipsel"] # #1031276
+ variant == "standard" and hostarch in ["armel", "armhf", "mipsel"] # #1031276
+ variant == "standard" and dist == "oldstable" # #864082, #1004557, #1004558
+
+Test: debug
+Variants: - standard
+Skip-If:
+ variant == "-" and hostarch not in ["armel", "armhf", "mipsel"] # #1031276
+ variant == "standard" and hostarch in ["armel", "armhf", "mipsel"] # #1031276
+ variant == "standard" and dist == "oldstable" # #864082, #1004557, #1004558
+
+Test: quiet
+Needs-Root: true
+
+Test: logfile
+Needs-Root: true
+Needs-APT-Config: true
+
+Test: without-etc-resolv-conf-and-etc-hostname
+Needs-QEMU: true
+
+Test: preserve-mode-of-etc-resolv-conf-and-etc-hostname
+Modes: root
+Needs-QEMU: true
+
+Test: not-having-to-install-apt-in-include-because-a-hook-did-it-before
+
+Test: remove-start-stop-daemon-and-policy-rc-d-in-hook
+
+Test: skip-start-stop-daemon-policy-rc
+
+Test: skip-mount
+Modes: unshare
+
+Test: compare-output-with-pre-seeded-var-cache-apt-archives
+Needs-QEMU: true
+Variants: any
+Skip-If:
+ variant == "standard" and dist == "oldstable" # #864082, #1004557, #1004558
+
+Test: create-directory-dry-run
+Modes: root
+
+Test: create-tarball-dry-run
+Variants: any
+Modes: any
+
+Test: unpack-doc-debian
+Modes: root fakechroot
+Variants: extract
+Needs-APT-Config: true
+
+Test: install-doc-debian
+Modes: chrootless
+Variants: custom
+Needs-APT-Config: true
+
+Test: chrootless
+Variants: essential
+Modes: chrootless
+Needs-Root: true
+Skip-If:
+ dist == "oldstable"
+
+Test: chrootless-fakeroot
+Variants: essential
+Modes: chrootless
+Skip-If:
+ dist == "oldstable"
+ hostarch in ["i386", "armel", "armhf", "mipsel"] # #1023286
+
+Test: chrootless-foreign
+Variants: essential
+Modes: chrootless
+Skip-If:
+ dist == "oldstable"
+ hostarch not in ["amd64", "arm64"]
+ not run_ma_same_tests
+Needs-QEMU: true
+
+Test: install-doc-debian-and-output-tarball
+Variants: custom
+Modes: chrootless
+Needs-APT-Config: true
+
+Test: install-doc-debian-and-test-hooks
+Variants: custom
+Modes: chrootless
+Needs-APT-Config: true
+
+Test: install-libmagic-mgc-on-foreign
+Variants: custom
+Modes: chrootless
+Skip-If:
+ hostarch not in ["amd64", "arm64"]
+ not have_binfmt
+
+Test: install-busybox-based-sub-essential-system
+Needs-Root: true
+
+Test: create-foreign-tarball
+Modes: root unshare fakechroot
+Skip-If:
+ hostarch not in ["amd64", "arm64"]
+ mode == "fakechroot" and not run_ma_same_tests
+ mode == "fakechroot" and hostarch == "arm64" # usrmerge postinst under fakechroot wants to copy /lib/ld-linux-x86-64.so.2 (which does not exist) instead of /lib64/ld-linux-x86-64.so.2
+ not have_binfmt
+
+Test: no-sbin-in-path
+Modes: fakechroot
+
+Test: dev-ptmx
+Modes: root unshare
+
+Test: error-if-stdout-is-tty
+
+Test: variant-custom-timeout
+
+Test: include-deb-file
+Modes: root unshare fakechroot
+Needs-APT-Config: true
+
+Test: unshare-include-deb
+Modes: unshare
+
+Test: pivot_root
+Modes: root unshare
+Needs-APT-Config: true
+
+Test: jessie-or-older
+Needs-Root: true
+Modes: root unshare fakechroot
+Variants: essential apt minbase
+Skip-If: mode == "fakechroot" and hostarch in ["i386", "armel", "armhf", "mipsel"] # #1023286
+
+Test: apt-patterns
+
+Test: apt-patterns-custom
+
+Test: empty-sources.list
+
+Test: merged-fakechroot-inside-unmerged-chroot
+Needs-Root: true
+Needs-APT-Config: true
+Skip-If:
+ hostarch in ["i386", "armel", "armhf", "mipsel"] # #1023286
+ dist in ["testing", "unstable"] # #1053671
+
+Test: auto-mode-as-normal-user
+Modes: auto
+
+Test: skip-output-dev
+Modes: root unshare
+
+Test: skip-output-mknod
+Modes: root unshare
+
+Test: skip-tar-in-mknod
+Modes: unshare
diff --git a/examples/twb/debian-11-minimal.py b/examples/twb/debian-11-minimal.py
new file mode 100644
index 0000000..fac7b9b
--- /dev/null
+++ b/examples/twb/debian-11-minimal.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python3
+import argparse
+import pathlib
+import subprocess
+import tempfile
+import pathlib
+
+__author__ = "Trent W. Buck"
+__copyright__ = "Copyright © 2020 Trent W. Buck"
+__license__ = "expat"
+
+__doc__ = """ build the simplest Debian Live image that can boot
+
+This uses mmdebstrap to do the heavy lifting;
+it can run entirely without root privileges.
+It emits a USB key disk image that contains a bootable EFI ESP,
+which in turn includes a bootloader (refind), kernel, ramdisk, and filesystem.squashfs.
+
+NOTE: this is the simplest config possible.
+ It lacks CRITICAL SECURITY AND DATA LOSS packages, such as amd64-microcode and smartd.
+"""
+
+parser = argparse.ArgumentParser(description=__doc__)
+parser.add_argument(
+ "output_file", nargs="?", default=pathlib.Path("filesystem.img"), type=pathlib.Path
+)
+args = parser.parse_args()
+
+
+filesystem_img_size = "256M" # big enough to include filesystem.squashfs + about 64M of bootloader, kernel, and ramdisk.
+esp_offset = 1024 * 1024 # 1MiB
+esp_label = "UEFI-ESP" # max 8 bytes for FAT32
+live_media_path = "debian-live"
+
+with tempfile.TemporaryDirectory(prefix="debian-live-bullseye-amd64-minimal.") as td:
+ td = pathlib.Path(td)
+ subprocess.check_call(
+ [
+ "mmdebstrap",
+ "--mode=unshare",
+ "--variant=apt",
+ '--aptopt=Acquire::http::Proxy "http://apt-cacher-ng.cyber.com.au:3142"',
+ '--aptopt=Acquire::https::Proxy "DIRECT"',
+ "--dpkgopt=force-unsafe-io",
+ "--include=linux-image-amd64 init initramfs-tools live-boot netbase",
+ "--include=dbus", # https://bugs.debian.org/814758
+ "--include=live-config iproute2 keyboard-configuration locales sudo user-setup",
+ "--include=ifupdown isc-dhcp-client", # live-config doesn't support systemd-networkd yet.
+ # Do the **BARE MINIMUM** to make a USB key that can boot on X86_64 UEFI.
+ # We use mtools so we do not ever need root privileges.
+ # We can't use mkfs.vfat, as that needs kpartx or losetup (i.e. root).
+ # We can't use mkfs.udf, as that needs mount (i.e. root).
+ # We can't use "refind-install --usedefault" as that runs mount(8) (i.e. root).
+ # We don't use genisoimage because
+ # 1) ISO9660 must die;
+ # 2) incomplete UDF 1.5+ support;
+ # 3) resulting filesystem can't be tweaked after flashing (e.g. debian-live/site.dir/etc/systemd/network/up.network).
+ #
+ # We use refind because 1) I hate grub; and 2) I like refind.
+ # If you want aarch64 or ia32 you need to install their BOOTxxx.EFI files.
+ # If you want kernel+initrd on something other than FAT, you need refind/drivers_xxx/xxx_xxx.EFI.
+ #
+ # FIXME: with qemu in UEFI mode (OVMF), I get dumped into startup.nsh (UEFI REPL).
+ # From there, I can manually type in "FS0:\EFI\BOOT\BOOTX64.EFI" to start refind, tho.
+ # So WTF is its problem? Does it not support fallback bootloader?
+ "--include=refind parted mtools",
+ "--essential-hook=echo refind refind/install_to_esp boolean false | chroot $1 debconf-set-selections",
+ "--customize-hook=echo refind refind/install_to_esp boolean true | chroot $1 debconf-set-selections",
+ "--customize-hook=chroot $1 mkdir -p /boot/USB /boot/EFI/BOOT",
+ "--customize-hook=chroot $1 cp /usr/share/refind/refind/refind_x64.efi /boot/EFI/BOOT/BOOTX64.EFI",
+ f"--customize-hook=chroot $1 truncate --size={filesystem_img_size} /boot/USB/filesystem.img",
+ f"--customize-hook=chroot $1 parted --script --align=optimal /boot/USB/filesystem.img mklabel gpt mkpart {esp_label} {esp_offset}b 100% set 1 esp on",
+ f"--customize-hook=chroot $1 mformat -i /boot/USB/filesystem.img@@{esp_offset} -F -v {esp_label}",
+ f"--customize-hook=chroot $1 mmd -i /boot/USB/filesystem.img@@{esp_offset} ::{live_media_path}",
+ f"""--customize-hook=echo '"Boot with default options" "boot=live live-media-path={live_media_path}"' >$1/boot/refind_linux.conf""",
+ # NOTE: find sidesteps the "glob expands before chroot applies" problem.
+ f"""--customize-hook=chroot $1 find -O3 /boot/ -xdev -mindepth 1 -maxdepth 1 -regextype posix-egrep -iregex '.*/(EFI|refind_linux.conf|vmlinuz.*|initrd.img.*)' -exec mcopy -vsbpm -i /boot/USB/filesystem.img@@{esp_offset} {{}} :: ';'""",
+ # FIXME: copy-out doesn't handle sparseness, so is REALLY slow (about 50 seconds).
+ # Therefore instead leave it in the squashfs, and extract it later.
+ # f'--customize-hook=copy-out /boot/USB/filesystem.img /tmp/',
+ # f'--customize-hook=chroot $1 rm /boot/USB/filesystem.img',
+ "bullseye",
+ td / "filesystem.squashfs",
+ ]
+ )
+
+ with args.output_file.open("wb") as f:
+ subprocess.check_call(
+ ["rdsquashfs", "--cat=boot/USB/filesystem.img", td / "filesystem.squashfs"],
+ stdout=f,
+ )
+ subprocess.check_call(
+ [
+ "mcopy",
+ "-i",
+ f"{args.output_file}@@{esp_offset}",
+ td / "filesystem.squashfs",
+ f"::{live_media_path}/filesystem.squashfs",
+ ]
+ )
diff --git a/examples/twb/debian-sid-zfs.py b/examples/twb/debian-sid-zfs.py
new file mode 100644
index 0000000..1d2e9ce
--- /dev/null
+++ b/examples/twb/debian-sid-zfs.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python3
+import argparse
+import pathlib
+import subprocess
+import tempfile
+import pathlib
+
+__author__ = "Trent W. Buck"
+__copyright__ = "Copyright © 2020 Trent W. Buck"
+__license__ = "expat"
+
+__doc__ = """ build a Debian Live image that can install Debian 11 on ZFS 2
+
+This uses mmdebstrap to do the heavy lifting;
+it can run entirely without root privileges.
+It emits a USB key disk image that contains a bootable EFI ESP,
+which in turn includes a bootloader (refind), kernel, ramdisk, and filesystem.squashfs.
+
+"""
+
+parser = argparse.ArgumentParser(description=__doc__)
+parser.add_argument(
+ "output_file", nargs="?", default=pathlib.Path("filesystem.img"), type=pathlib.Path
+)
+parser.add_argument(
+ "--timezone",
+ default="Australia/Melbourne",
+ type=lambda s: s.split("/"),
+ help='NOTE: MUST be "Area/Zone" not e.g. "UTC", for now',
+)
+parser.add_argument(
+ "--locale", default="en_AU.UTF-8", help='NOTE: MUST end in ".UTF-8", for now'
+)
+args = parser.parse_args()
+
+
+filesystem_img_size = "512M" # big enough to include filesystem.squashfs + about 64M of bootloader, kernel, and ramdisk.
+esp_offset = 1024 * 1024 # 1MiB
+esp_label = "UEFI-ESP" # max 8 bytes for FAT32
+live_media_path = "debian-live"
+
+with tempfile.TemporaryDirectory(prefix="debian-sid-zfs.") as td:
+ td = pathlib.Path(td)
+ subprocess.check_call(
+ [
+ "mmdebstrap",
+ "--mode=unshare",
+ "--variant=apt",
+ '--aptopt=Acquire::http::Proxy "http://apt-cacher-ng.cyber.com.au:3142"',
+ '--aptopt=Acquire::https::Proxy "DIRECT"',
+ "--dpkgopt=force-unsafe-io",
+ "--components=main contrib non-free", # needed for CPU security patches
+ "--include=init initramfs-tools xz-utils live-boot netbase",
+ "--include=dbus", # https://bugs.debian.org/814758
+ "--include=linux-image-amd64 firmware-linux",
+ # Have ZFS 2.0 support.
+ "--include=zfs-dkms zfsutils-linux zfs-zed build-essential linux-headers-amd64", # ZFS 2 support
+ # Make the initrd a little smaller (41MB -> 20MB), at the expensive of significantly slower image build time.
+ "--include=zstd",
+ "--essential-hook=mkdir -p $1/etc/initramfs-tools/conf.d",
+ "--essential-hook=>$1/etc/initramfs-tools/conf.d/zstd echo COMPRESS=zstd",
+ # Be the equivalent of Debian Live GNOME
+ # '--include=live-task-gnome',
+ #'--include=live-task-xfce',
+ # FIXME: enable this? It makes live-task-xfce go from 1G to 16G... so no.
+ #'--aptopt=Apt::Install-Recommends "true"',
+ # ...cherry-pick instead
+ # UPDATE: debian-installer-launcher DOES NOT WORK because we don't load crap SPECIFICALLY into /live/installer, in the ESP.
+ # UPDATE: network-manager-gnome DOES NOT WORK, nor is systemd-networkd auto-started... WTF?
+ # end result is no networking.
+ #'--include=live-config user-setup sudo firmware-linux haveged',
+ #'--include=calamares-settings-debian udisks2', # 300MB weirdo Qt GUI debian installer
+ #'--include=xfce4-terminal',
+ # x86_64 CPUs are undocumented proprietary RISC chips that EMULATE a documented x86_64 CISC ISA.
+ # The emulator is called "microcode", and is full of security vulnerabilities.
+ # Make sure security patches for microcode for *ALL* CPUs are included.
+ # By default, it tries to auto-detect the running CPU, so only patches the CPU of the build server.
+ "--include=intel-microcode amd64-microcode iucode-tool",
+ "--essential-hook=>$1/etc/default/intel-microcode echo IUCODE_TOOL_INITRAMFS=yes IUCODE_TOOL_SCANCPUS=no",
+ "--essential-hook=>$1/etc/default/amd64-microcode echo AMD64UCODE_INITRAMFS=yes",
+ "--dpkgopt=force-confold", # Work around https://bugs.debian.org/981004
+ # DHCP/DNS/SNTP clients...
+ # FIXME: use live-config ?
+ "--include=libnss-resolve libnss-myhostname systemd-timesyncd",
+ "--customize-hook=chroot $1 cp -alf /lib/systemd/resolv.conf /etc/resolv.conf", # This probably needs to happen LAST
+ # FIXME: fix resolv.conf to point to resolved, not "copy from the build-time OS"
+ # FIXME: fix hostname & hosts to not exist, not "copy from the build-time OS"
+ "--customize-hook=systemctl --root=$1 enable systemd-networkd systemd-timesyncd", # is this needed?
+ # Run a DHCP client on *ALL* ifaces.
+ # Consider network "up" (start sshd and local login prompt) when *ANY* (not ALL) ifaces are up.
+ "--customize-hook=>$1/etc/systemd/network/up.network printf '%s\n' '[Match]' Name='en*' '[Network]' DHCP=yes", # try DHCP on all ethernet ifaces
+ "--customize-hook=mkdir $1/etc/systemd/system/systemd-networkd-wait-online.service.d",
+ "--customize-hook=>$1/etc/systemd/system/systemd-networkd-wait-online.service.d/any-not-all.conf printf '%s\n' '[Service]' 'ExecStart=' 'ExecStart=/lib/systemd/systemd-networkd-wait-online --any'",
+ # Hope there's a central smarthost SMTP server called "mail" in the local search domain.
+ # FIXME: can live-config do this?
+ "--include=msmtp-mta",
+ "--customize-hook=>$1/etc/msmtprc printf '%s\n' 'account default' 'syslog LOG_MAIL' 'host mail' 'auto_from on'",
+ # Hope there's a central RELP logserver called "logserv" in the local domain.
+ # FIXME: can live-config do this?
+ "--include=rsyslog-relp",
+ """--customize-hook=>$1/etc/rsyslog.conf printf '%s\n' 'module(load="imuxsock")' 'module(load="imklog")' 'module(load="omrelp")' 'action(type="omrelp" target="logserv" port="2514" template="RSYSLOG_SyslogProtocol23Format")'""",
+ # Run self-tests on all discoverable hard disks, and (try to) email if something goes wrong.
+ "--include=smartmontools bsd-mailx",
+ "--customize-hook=>$1/etc/smartd.conf echo 'DEVICESCAN -n standby,15 -a -o on -S on -s (S/../../7/00|L/../01/./01) -t -H -m root -M once'",
+ # For rarely-updated, rarely-rebooted SOEs, apply what security updates we can into transient tmpfs COW.
+ # This CANNOT apply kernel security updates (though it will download them).
+ # This CANNOT make the upgrades persistent across reboots (they re-download each boot).
+ # FIXME: Would it be cleaner to set Environment=NEEDRESTART_MODE=a in
+ # apt-daily-upgrade.service and/or
+ # unattended-upgrades.service, so
+ # needrestart is noninteractive only when apt is noninteractive?
+ "--include=unattended-upgrades needrestart",
+ "--customize-hook=echo 'unattended-upgrades unattended-upgrades/enable_auto_updates boolean true' | chroot $1 debconf-set-selections",
+ """--customize-hook=>$1/etc/needrestart/conf.d/unattended-needrestart.conf echo '$nrconf{restart} = "a";'""", # https://bugs.debian.org/894444
+ # Do an apt update & apt upgrade at boot time (as well as @daily).
+ # The lack of /etc/machine-id causes these to be implicitly enabled.
+ # FIXME: use dropin in /etc.
+ "--customize-hook=>>$1/lib/systemd/system/apt-daily.service printf '%s\n' '[Install]' 'WantedBy=multi-user.target'",
+ "--customize-hook=>>$1/lib/systemd/system/apt-daily-upgrade.service printf '%s\n' '[Install]' 'WantedBy=multi-user.target'",
+ # FIXME: add support for this stuff (for the non-live final install this happens via ansible):
+ #
+ # unattended-upgrades
+ # smartd
+ # networkd (boot off ANY NIC, not EVERY NIC -- https://github.com/systemd/systemd/issues/9714)
+ # refind (bootloader config)
+ # misc safety nets
+ # double-check that mmdebstrap's machine-id support works properly
+ # Bare minimum to let me SSH in.
+ # FIXME: make this configurable.
+ # FIXME: trust a CA certificate instead -- see Zero Trust SSH, Jeremy Stott, LCA 2020 <https://youtu.be/lYzklWPTbsQ>
+ # WARNING: tinysshd does not support RSA, nor MaxStartups, nor sftp (unless you also install openssh-client, which is huge).
+ # FIXME: double-check no host keys are baked into the image (openssh-server and dropbear do this).
+ "--include=tinysshd rsync",
+ "--essential-hook=install -dm700 $1/root/.ssh",
+ '--essential-hook=echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIapAZ0E0353DaY6xBnasvu/DOvdWdKQ6RQURwq4l6Wu twb@cyber.com.au (Trent W. Buck)" >$1/root/.ssh/authorized_keys',
+ # Bare minimum to let me log in locally.
+ # DO NOT use this on production builds!
+ "--essential-hook=chroot $1 passwd --delete root",
+ # Configure language (not needed to boot).
+ # Racism saves a **LOT** of space -- something like 2GB for Debian Live images.
+ # FIXME: use live-config instead?
+ "--include=locales localepurge",
+ f"--essential-hook=echo locales locales/default_environment_locale select {args.locale} | chroot $1 debconf-set-selections",
+ f"--essential-hook=echo locales locales/locales_to_be_generated multiselect {args.locale} UTF-8 | chroot $1 debconf-set-selections",
+ # FIXME: https://bugs.debian.org/603700
+ "--customize-hook=chroot $1 sed -i /etc/locale.nopurge -e 's/^USE_DPKG/#ARGH#&/'",
+ "--customize-hook=chroot $1 localepurge",
+ "--customize-hook=chroot $1 sed -i /etc/locale.nopurge -e 's/^#ARGH#//'",
+ # Removing documentation also saves a LOT of space.
+ "--dpkgopt=path-exclude=/usr/share/doc/*",
+ "--dpkgopt=path-exclude=/usr/share/info/*",
+ "--dpkgopt=path-exclude=/usr/share/man/*",
+ "--dpkgopt=path-exclude=/usr/share/omf/*",
+ "--dpkgopt=path-exclude=/usr/share/help/*",
+ "--dpkgopt=path-exclude=/usr/share/gnome/help/*",
+ # Configure timezone (not needed to boot)`
+ # FIXME: use live-config instead?
+ "--include=tzdata",
+ f"--essential-hook=echo tzdata tzdata/Areas select {args.timezone[0]} | chroot $1 debconf-set-selections",
+ f"--essential-hook=echo tzdata tzdata/Zones/{args.timezone[0]} select {args.timezone[1]} | chroot $1 debconf-set-selections",
+ # Do the **BARE MINIMUM** to make a USB key that can boot on X86_64 UEFI.
+ # We use mtools so we do not ever need root privileges.
+ # We can't use mkfs.vfat, as that needs kpartx or losetup (i.e. root).
+ # We can't use mkfs.udf, as that needs mount (i.e. root).
+ # We can't use "refind-install --usedefault" as that runs mount(8) (i.e. root).
+ # We don't use genisoimage because
+ # 1) ISO9660 must die;
+ # 2) incomplete UDF 1.5+ support;
+ # 3) resulting filesystem can't be tweaked after flashing (e.g. debian-live/site.dir/etc/systemd/network/up.network).
+ #
+ # We use refind because 1) I hate grub; and 2) I like refind.
+ # If you want aarch64 or ia32 you need to install their BOOTxxx.EFI files.
+ # If you want kernel+initrd on something other than FAT, you need refind/drivers_xxx/xxx_xxx.EFI.
+ #
+ # FIXME: with qemu in UEFI mode (OVMF), I get dumped into startup.nsh (UEFI REPL).
+ # From there, I can manually type in "FS0:\EFI\BOOT\BOOTX64.EFI" to start refind, tho.
+ # So WTF is its problem? Does it not support fallback bootloader?
+ "--include=refind parted mtools",
+ "--essential-hook=echo refind refind/install_to_esp boolean false | chroot $1 debconf-set-selections",
+ "--customize-hook=echo refind refind/install_to_esp boolean true | chroot $1 debconf-set-selections",
+ "--customize-hook=chroot $1 mkdir -p /boot/USB /boot/EFI/BOOT",
+ "--customize-hook=chroot $1 cp /usr/share/refind/refind/refind_x64.efi /boot/EFI/BOOT/BOOTX64.EFI",
+ "--customize-hook=chroot $1 cp /usr/share/refind/refind/refind.conf-sample /boot/EFI/BOOT/refind.conf",
+ f"--customize-hook=chroot $1 truncate --size={filesystem_img_size} /boot/USB/filesystem.img",
+ f"--customize-hook=chroot $1 parted --script --align=optimal /boot/USB/filesystem.img mklabel gpt mkpart {esp_label} {esp_offset}b 100% set 1 esp on",
+ f"--customize-hook=chroot $1 mformat -i /boot/USB/filesystem.img@@{esp_offset} -F -v {esp_label}",
+ f"--customize-hook=chroot $1 mmd -i /boot/USB/filesystem.img@@{esp_offset} ::{live_media_path}",
+ f"""--customize-hook=echo '"Boot with default options" "boot=live live-media-path={live_media_path}"' >$1/boot/refind_linux.conf""",
+ f"""--customize-hook=chroot $1 find /boot/ -xdev -mindepth 1 -maxdepth 1 -not -name filesystem.img -not -name USB -exec mcopy -vsbpm -i /boot/USB/filesystem.img@@{esp_offset} {{}} :: ';'""",
+ # FIXME: copy-out doesn't handle sparseness, so is REALLY slow (about 50 seconds).
+ # Therefore instead leave it in the squashfs, and extract it later.
+ # f'--customize-hook=copy-out /boot/USB/filesystem.img /tmp/',
+ # f'--customize-hook=chroot $1 rm /boot/USB/filesystem.img',
+ "sid",
+ td / "filesystem.squashfs",
+ ]
+ )
+
+ with args.output_file.open("wb") as f:
+ subprocess.check_call(
+ ["rdsquashfs", "--cat=boot/USB/filesystem.img", td / "filesystem.squashfs"],
+ stdout=f,
+ )
+ subprocess.check_call(
+ [
+ "mcopy",
+ "-i",
+ f"{args.output_file}@@{esp_offset}",
+ td / "filesystem.squashfs",
+ f"::{live_media_path}/filesystem.squashfs",
+ ]
+ )
diff --git a/gpgvnoexpkeysig b/gpgvnoexpkeysig
new file mode 100755
index 0000000..f528ee4
--- /dev/null
+++ b/gpgvnoexpkeysig
@@ -0,0 +1,51 @@
+#!/bin/sh
+#
+# No copyright is claimed. This code is in the public domain; do with
+# it what you wish.
+#
+# Author: Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
+#
+# This is a wrapper around gpgv as invoked by apt. It turns EXPKEYSIG results
+# from gpgv into GOODSIG results. This is necessary for apt to access very old
+# timestamps from snapshot.debian.org for which the GPG key is already expired:
+#
+# Get:1 http://snapshot.debian.org/archive/debian/20150106T000000Z unstable InRelease [242 kB]
+# Err:1 http://snapshot.debian.org/archive/debian/20150106T000000Z unstable InRelease
+# The following signatures were invalid: EXPKEYSIG 8B48AD6246925553 Debian Archive Automatic Signing Key (7.0/wheezy) <ftpmaster@debian.org>
+# Reading package lists...
+# W: GPG error: http://snapshot.debian.org/archive/debian/20150106T000000Z unstable InRelease: The following signatures were invalid: EXPKEYSIG 8B48AD6246925553 Debian Archive Automatic Signing Key (7.0/wheezy) <ftpmaster@debian.org>
+# E: The repository 'http://snapshot.debian.org/archive/debian/20150106T000000Z unstable InRelease' is not signed.
+#
+# To use this script, call apt with
+#
+# -o Apt::Key::gpgvcommand=/usr/libexec/mmdebstrap/gpgvnoexpkeysig
+#
+# Scripts doing similar things can be found here:
+#
+# * debuerreotype as /usr/share/debuerreotype/scripts/.gpgv-ignore-expiration.sh
+# * derivative census: salsa.d.o/deriv-team/census/-/blob/master/bin/fakegpgv
+
+set -eu
+
+find_gpgv_status_fd() {
+ while [ "$#" -gt 0 ]; do
+ if [ "$1" = '--status-fd' ]; then
+ echo "$2"
+ return 0
+ fi
+ shift
+ done
+ # default fd is stdout
+ echo 1
+}
+GPGSTATUSFD="$(find_gpgv_status_fd "$@")"
+
+case $GPGSTATUSFD in
+ ''|*[!0-9]*)
+ echo "invalid --status-fd argument" >&2
+ exit 1
+ ;;
+esac
+
+# we need eval because we cannot redirect a variable fd
+eval 'exec gpgv "$@" '"$GPGSTATUSFD"'>&1 | sed "s/^\[GNUPG:\] EXPKEYSIG /[GNUPG:] GOODSIG /" >&'"$GPGSTATUSFD"
diff --git a/hooks/busybox/extract00.sh b/hooks/busybox/extract00.sh
new file mode 100755
index 0000000..7d9b6ec
--- /dev/null
+++ b/hooks/busybox/extract00.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+rootdir="$1"
+
+# Run busybox using an absolute path so that this script also works in case
+# /proc is not mounted. Busybox uses /proc/self/exe to figure out the path
+# to its executable.
+chroot "$rootdir" /bin/busybox --install -s
diff --git a/hooks/busybox/setup00.sh b/hooks/busybox/setup00.sh
new file mode 100755
index 0000000..fc65e12
--- /dev/null
+++ b/hooks/busybox/setup00.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+rootdir="$1"
+
+mkdir -p "$rootdir/bin"
+echo root:x:0:0:root:/root:/bin/sh > "$rootdir/etc/passwd"
+cat << END > "$rootdir/etc/group"
+root:x:0:
+mail:x:8:
+utmp:x:43:
+END
diff --git a/hooks/copy-host-apt-sources-and-preferences/customize00.pl b/hooks/copy-host-apt-sources-and-preferences/customize00.pl
new file mode 100755
index 0000000..53f6059
--- /dev/null
+++ b/hooks/copy-host-apt-sources-and-preferences/customize00.pl
@@ -0,0 +1,44 @@
+#!/usr/bin/perl
+#
+# This script makes sure that all packages that are installed both locally as
+# well as inside the chroot have the same version.
+#
+# It is implemented in Perl because there are no associative arrays in POSIX
+# shell.
+
+use strict;
+use warnings;
+
+sub get_pkgs {
+ my $root = shift;
+ my %pkgs = ();
+ open(my $fh, '-|', 'dpkg-query', "--root=$root", '--showformat',
+ '${binary:Package}=${Version}\n', '--show')
+ // die "cannot exec dpkg-query";
+ while (my $line = <$fh>) {
+ my ($pkg, $ver) = split(/=/, $line, 2);
+ $pkgs{$pkg} = $ver;
+ }
+ close $fh;
+ if ($? != 0) { die "failed to run dpkg-query" }
+ return %pkgs;
+}
+
+my %pkgs_local = get_pkgs('/');
+my %pkgs_chroot = get_pkgs($ARGV[0]);
+
+my @diff = ();
+foreach my $pkg (keys %pkgs_chroot) {
+ next unless exists $pkgs_local{$pkg};
+ if ($pkgs_local{$pkg} ne $pkgs_chroot{$pkg}) {
+ push @diff, $pkg;
+ }
+}
+
+if (scalar @diff > 0) {
+ print STDERR "E: packages from the host and the chroot differ:\n";
+ foreach my $pkg (@diff) {
+ print STDERR "E: $pkg $pkgs_local{$pkg} $pkgs_chroot{$pkg}\n";
+ }
+ exit 1;
+}
diff --git a/hooks/copy-host-apt-sources-and-preferences/setup00.sh b/hooks/copy-host-apt-sources-and-preferences/setup00.sh
new file mode 100755
index 0000000..07caa78
--- /dev/null
+++ b/hooks/copy-host-apt-sources-and-preferences/setup00.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+if [ -n "${MMDEBSTRAP_SUITE:-}" ]; then
+ if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 1 ]; then
+ echo "W: using a non-empty suite name $MMDEBSTRAP_SUITE does not make sense with this hook and might select the wrong Essential:yes package set" >&2
+ fi
+fi
+
+rootdir="$1"
+
+SOURCELIST="/etc/apt/sources.list"
+eval "$(apt-config shell SOURCELIST Dir::Etc::SourceList/f)"
+SOURCEPARTS="/etc/apt/sources.d/"
+eval "$(apt-config shell SOURCEPARTS Dir::Etc::SourceParts/d)"
+PREFERENCES="/etc/apt/preferences"
+eval "$(apt-config shell PREFERENCES Dir::Etc::Preferences/f)"
+PREFERENCESPARTS="/etc/apt/preferences.d/"
+eval "$(apt-config shell PREFERENCESPARTS Dir::Etc::PreferencesParts/d)"
+
+for f in "$SOURCELIST" \
+ "$SOURCEPARTS"/*.list \
+ "$SOURCEPARTS"/*.sources \
+ "$PREFERENCES" \
+ "$PREFERENCESPARTS"/*; do
+ [ -e "$f" ] || continue
+ mkdir --parents "$(dirname "$rootdir/$f")"
+ if [ -e "$rootdir/$f" ]; then
+ if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 2 ]; then
+ echo "I: $f already exists in chroot, appending..." >&2
+ fi
+ # Add extra newline between old content and new content.
+ # This is required in case of deb822 files.
+ echo >> "$rootdir/$f"
+ fi
+ cat "$f" >> "$rootdir/$f"
+ if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ echo "D: contents of $f inside the chroot:" >&2
+ cat "$rootdir/$f" >&2
+ fi
+done
diff --git a/hooks/eatmydata/README.txt b/hooks/eatmydata/README.txt
new file mode 100644
index 0000000..84659e9
--- /dev/null
+++ b/hooks/eatmydata/README.txt
@@ -0,0 +1,5 @@
+Adding this directory with --hook-directory will result in mmdebstrap using
+dpkg inside an eatmydata wrapper script. This will result in spead-ups on
+systems where sync() takes some time. Using --dpkgopt=force-unsafe-io will have
+a lesser effect compared to eatmydata. See:
+https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=613428
diff --git a/hooks/eatmydata/customize.sh b/hooks/eatmydata/customize.sh
new file mode 100755
index 0000000..c675848
--- /dev/null
+++ b/hooks/eatmydata/customize.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+rootdir="$1"
+
+if [ -e "$rootdir/var/lib/dpkg/arch" ]; then
+ chrootarch=$(head -1 "$rootdir/var/lib/dpkg/arch")
+else
+ chrootarch=$(dpkg --print-architecture)
+fi
+libdir="/usr/lib/$(dpkg-architecture -a "$chrootarch" -q DEB_HOST_MULTIARCH)"
+
+# if eatmydata was actually installed properly, then we are not removing
+# anything here
+if ! chroot "$rootdir" dpkg-query --show eatmydata; then
+ rm "$rootdir/usr/bin/eatmydata"
+fi
+if ! chroot "$rootdir" dpkg-query --show libeatmydata1; then
+ rm "$rootdir$libdir"/libeatmydata.so*
+fi
+
+rm "$rootdir/usr/bin/dpkg"
+chroot "$rootdir" dpkg-divert --local --rename --remove /usr/bin/dpkg
+
+sync
diff --git a/hooks/eatmydata/extract.sh b/hooks/eatmydata/extract.sh
new file mode 100755
index 0000000..7ffe8e5
--- /dev/null
+++ b/hooks/eatmydata/extract.sh
@@ -0,0 +1,75 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+rootdir="$1"
+
+if [ -e "$rootdir/var/lib/dpkg/arch" ]; then
+ chrootarch=$(head -1 "$rootdir/var/lib/dpkg/arch")
+else
+ chrootarch=$(dpkg --print-architecture)
+fi
+
+trusted=
+eval "$(apt-config shell trusted Dir::Etc::trusted/f)"
+trustedparts=
+eval "$(apt-config shell trustedparts Dir::Etc::trustedparts/d)"
+tmpfile=$(mktemp --tmpdir="$rootdir/tmp")
+cat << END > "$tmpfile"
+Apt::Architecture "$chrootarch";
+Apt::Architectures "$chrootarch";
+Dir "$rootdir";
+Dir::Etc::Trusted "$trusted";
+Dir::Etc::TrustedParts "$trustedparts";
+END
+# we run "apt-get download --print-uris" in a temporary directory, to make sure
+# that the packages do not already exist in the current directory, or otherwise
+# nothing will be printed for them
+tmpdir=$(mktemp --directory --tmpdir="$rootdir/tmp")
+env --chdir="$tmpdir" APT_CONFIG="$tmpfile" apt-get download --print-uris eatmydata libeatmydata1 \
+ | sed -ne "s/^'\([^']\+\)'\s\+\(\S\+\)\s\+\([0-9]\+\)\s\+\(SHA256:[a-f0-9]\+\)$/\1 \2 \3 \4/p" \
+ | while read -r uri fname size hash; do
+ echo "processing $fname" >&2
+ if [ -e "$tmpdir/$fname" ]; then
+ echo "$tmpdir/$fname already exists" >&2
+ exit 1
+ fi
+ [ -z "$hash" ] && hash="Checksum-FileSize:$size"
+ env --chdir="$tmpdir" APT_CONFIG="$tmpfile" /usr/lib/apt/apt-helper download-file "$uri" "$fname" "$hash"
+ case "$fname" in
+ eatmydata_*_all.deb)
+ mkdir -p "$rootdir/usr/bin"
+ dpkg-deb --fsys-tarfile "$tmpdir/$fname" \
+ | tar --directory="$rootdir/usr/bin" --strip-components=3 --extract --verbose ./usr/bin/eatmydata
+ ;;
+ libeatmydata1_*_$chrootarch.deb)
+ libdir="/usr/lib/$(dpkg-architecture -a "$chrootarch" -q DEB_HOST_MULTIARCH)"
+ mkdir -p "$rootdir$libdir"
+ dpkg-deb --fsys-tarfile "$tmpdir/$fname" \
+ | tar --directory="$rootdir$libdir" --strip-components=4 --extract --verbose --wildcards ".$libdir/libeatmydata.so*"
+ ;;
+ *)
+ echo "unexpected filename: $fname" >&2
+ exit 1
+ ;;
+ esac
+ rm "$tmpdir/$fname"
+done
+rm "$tmpfile"
+rmdir "$tmpdir"
+
+mv "$rootdir/usr/bin/dpkg" "$rootdir/usr/bin/dpkg.distrib"
+cat << END > "$rootdir/usr/bin/dpkg"
+#!/bin/sh
+exec /usr/bin/eatmydata /usr/bin/dpkg.distrib "\$@"
+END
+chmod +x "$rootdir/usr/bin/dpkg"
+cat << END >> "$rootdir/var/lib/dpkg/diversions"
+/usr/bin/dpkg
+/usr/bin/dpkg.distrib
+:
+END
diff --git a/hooks/file-mirror-automount/customize00.sh b/hooks/file-mirror-automount/customize00.sh
new file mode 100755
index 0000000..b6b9b46
--- /dev/null
+++ b/hooks/file-mirror-automount/customize00.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+# shellcheck disable=SC2086
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+rootdir="$1"
+
+if [ ! -e "$rootdir/run/mmdebstrap/file-mirror-automount" ]; then
+ exit 0
+fi
+
+xargsopts="--null --no-run-if-empty -I {} --max-args=1"
+
+case $MMDEBSTRAP_MODE in
+ root|unshare)
+ echo "unmounting the following mountpoints:" >&2 ;;
+ *)
+ echo "removing the following directories:" >&2 ;;
+esac
+
+< "$rootdir/run/mmdebstrap/file-mirror-automount" \
+ xargs $xargsopts echo " $rootdir/{}"
+
+case $MMDEBSTRAP_MODE in
+ root|unshare)
+ < "$rootdir/run/mmdebstrap/file-mirror-automount" \
+ xargs $xargsopts umount "$rootdir/{}"
+ ;;
+ *)
+ < "$rootdir/run/mmdebstrap/file-mirror-automount" \
+ xargs $xargsopts rm -r "$rootdir/{}"
+ ;;
+esac
+
+rm "$rootdir/run/mmdebstrap/file-mirror-automount"
+rmdir --ignore-fail-on-non-empty "$rootdir/run/mmdebstrap"
diff --git a/hooks/file-mirror-automount/setup00.sh b/hooks/file-mirror-automount/setup00.sh
new file mode 100755
index 0000000..61f60f2
--- /dev/null
+++ b/hooks/file-mirror-automount/setup00.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+rootdir="$1"
+
+# process all configured apt repositories
+env APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-get indextargets --no-release-info --format '$(REPO_URI)' \
+ | sed -ne 's/^file:\/\+//p' \
+ | sort -u \
+ | while read -r path; do
+ mkdir -p "$rootdir/run/mmdebstrap"
+ if [ ! -d "/$path" ]; then
+ echo "/$path is not an existing directory" >&2
+ continue
+ fi
+ case $MMDEBSTRAP_MODE in
+ root|unshare)
+ echo "bind-mounting /$path into the chroot" >&2
+ mkdir -p "$rootdir/$path"
+ mount -o ro,bind "/$path" "$rootdir/$path"
+ ;;
+ *)
+ echo "copying /$path into the chroot" >&2
+ mkdir -p "$rootdir/$path"
+ "$MMDEBSTRAP_ARGV0" --hook-helper "$rootdir" "$MMDEBSTRAP_MODE" "$MMDEBSTRAP_HOOK" env "$MMDEBSTRAP_VERBOSITY" sync-in "/$path" "/$path" <&"$MMDEBSTRAP_HOOKSOCK" >&"$MMDEBSTRAP_HOOKSOCK"
+ ;;
+ esac
+ printf '/%s\0' "$path" >> "$rootdir/run/mmdebstrap/file-mirror-automount"
+ done
+
+# process all files given via --include
+set -f # turn off pathname expansion
+IFS=',' # split by comma
+for pkg in $MMDEBSTRAP_INCLUDE; do
+ set +f; unset IFS
+ case $pkg in
+ ./*|../*|/*) : ;; # we are interested in this case
+ *) continue ;; # not a file
+ esac
+ # undo escaping
+ pkg="$(printf '%s' "$pkg" | sed 's/%2C/,/g; s/%25/%/g')"
+ # check for existance
+ if [ ! -f "$pkg" ]; then
+ echo "$pkg does not exist" >&2
+ continue
+ fi
+ # make path absolute
+ pkg="$(realpath "$pkg")"
+ case "$pkg" in
+ /*) : ;;
+ *) echo "path for $pkg is not absolute" >&2; continue;;
+ esac
+ mkdir -p "$rootdir/run/mmdebstrap"
+ mkdir -p "$rootdir/$(dirname "$pkg")"
+ case $MMDEBSTRAP_MODE in
+ root|unshare)
+ echo "bind-mounting $pkg into the chroot" >&2
+ touch "$rootdir/$pkg"
+ mount -o bind "$pkg" "$rootdir/$pkg"
+ ;;
+ *)
+ echo "copying $pkg into the chroot" >&2
+ "$MMDEBSTRAP_ARGV0" --hook-helper "$rootdir" "$MMDEBSTRAP_MODE" "$MMDEBSTRAP_HOOK" env "$MMDEBSTRAP_VERBOSITY" upload "$pkg" "$pkg" <&"$MMDEBSTRAP_HOOKSOCK" >&"$MMDEBSTRAP_HOOKSOCK"
+ ;;
+ esac
+ printf '/%s\0' "$pkg" >> "$rootdir/run/mmdebstrap/file-mirror-automount"
+done
+set +f; unset IFS
diff --git a/hooks/jessie-or-older/extract00.sh b/hooks/jessie-or-older/extract00.sh
new file mode 100755
index 0000000..f327052
--- /dev/null
+++ b/hooks/jessie-or-older/extract00.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+TARGET="$1"
+
+# not needed since dpkg 1.17.11
+for f in available diversions cmethopt; do
+ if [ ! -e "$TARGET/var/lib/dpkg/$f" ]; then
+ touch "$TARGET/var/lib/dpkg/$f"
+ fi
+done
diff --git a/hooks/jessie-or-older/extract01.sh b/hooks/jessie-or-older/extract01.sh
new file mode 100755
index 0000000..43f1540
--- /dev/null
+++ b/hooks/jessie-or-older/extract01.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# needed until init 1.33 which pre-depends on systemd-sysv
+# starting with init 1.34, init is not Essential:yes anymore
+#
+# jessie has init 1.22
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+TARGET="$1"
+
+if [ -z "${MMDEBSTRAP_ESSENTIAL+x}" ]; then
+ MMDEBSTRAP_ESSENTIAL=
+ for f in "$TARGET/var/cache/apt/archives/"*.deb; do
+ [ -f "$f" ] || continue
+ f="${f#"$TARGET"}"
+ MMDEBSTRAP_ESSENTIAL="$MMDEBSTRAP_ESSENTIAL $f"
+ done
+fi
+
+fname_base_passwd=
+fname_base_files=
+fname_dpkg=
+for pkg in $MMDEBSTRAP_ESSENTIAL; do
+ pkgname=$(dpkg-deb --show --showformat='${Package}' "$TARGET/$pkg")
+ # shellcheck disable=SC2034
+ case $pkgname in
+ base-passwd) fname_base_passwd=$pkg;;
+ base-files) fname_base_files=$pkg;;
+ dpkg) fname_dpkg=$pkg;;
+ esac
+done
+
+for var in base_passwd base_files dpkg; do
+ eval 'val=$fname_'"$var"
+ [ -z "$val" ] && continue
+ chroot "$TARGET" dpkg --install --force-depends "$val"
+done
+
+# shellcheck disable=SC2086
+chroot "$TARGET" dpkg --unpack --force-depends $MMDEBSTRAP_ESSENTIAL
+
+chroot "$TARGET" dpkg --configure --pending
diff --git a/hooks/maybe-jessie-or-older/extract00.sh b/hooks/maybe-jessie-or-older/extract00.sh
new file mode 100755
index 0000000..9a82fbf
--- /dev/null
+++ b/hooks/maybe-jessie-or-older/extract00.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+set -eu
+
+# we need to check the version of dpkg
+# since at this point packages are just extracted but not installed, we cannot use dpkg-query
+# since we want to support chrootless, we cannot run dpkg --version inside the chroot
+# to avoid this hook depending on dpkg-dev being installed, we do not parse the extracted changelog with dpkg-parsechangelog
+# we also want to avoid parsing the changelog because /usr/share/doc might've been added to dpkg --path-exclude
+# instead, we just ask apt about the latest version of dpkg it knows of
+# this should only fail in situations where there are multiple versions of dpkg in different suites
+ver=$(env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-cache show --no-all-versions dpkg 2>/dev/null | sed -ne 's/^Version: \(.*\)$/\1/p' || printf '')
+if [ -z "$ver" ]; then
+ echo "no package called dpkg can be installed -- not running jessie-or-older extract00 hook" >&2
+ exit 0
+fi
+
+if dpkg --compare-versions "$ver" ge 1.17.11; then
+ echo "dpkg version $ver is >= 1.17.11 -- not running jessie-or-older extract00 hook" >&2
+ exit 0
+else
+ echo "dpkg version $ver is << 1.17.11 -- running jessie-or-older extract00 hook" >&2
+fi
+
+# resolve the script path using several methods in order:
+# 1. using dirname -- "$0"
+# 2. using ./hooks
+# 3. using /usr/share/mmdebstrap/hooks/
+for p in "$(dirname -- "$0")/.." ./hooks /usr/share/mmdebstrap/hooks; do
+ if [ -x "$p/jessie-or-older/extract00.sh" ] && [ -x "$p/jessie-or-older/extract01.sh" ]; then
+ "$p/jessie-or-older/extract00.sh" "$1"
+ exit 0
+ fi
+done
+
+echo "cannot find jessie-or-older hook anywhere" >&2
+exit 1
diff --git a/hooks/maybe-jessie-or-older/extract01.sh b/hooks/maybe-jessie-or-older/extract01.sh
new file mode 100755
index 0000000..9a92d6d
--- /dev/null
+++ b/hooks/maybe-jessie-or-older/extract01.sh
@@ -0,0 +1,57 @@
+#!/bin/sh
+
+set -eu
+
+# The jessie-or-older extract01 hook has to be run up to the point where the
+# Essential:yes field was removed from the init package (with
+# init-system-helpers 1.34). Since the essential packages have only been
+# extracted but not installed, we cannot use dpkg-query to find out its
+# version. Since /usr/share/doc might be missing due to dpkg --path-exclude, we
+# also cannot check whether /usr/share/doc/init/copyright exists. There also
+# was a time (before init-system-helpers 1.20) where there was no init package
+# at all where we also want to apply this hook. So we just ask apt about the
+# candidate version for init-system-helpers. This should only fail in
+# situations where there are multiple versions of init-system-helpers in
+# different suites.
+ver=$(env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-cache show --no-all-versions init-system-helpers 2>/dev/null | sed -ne 's/^Version: \(.*\)$/\1/p' || printf '')
+if [ -z "$ver" ]; then
+ # there is no package called init-system-helpers, so either:
+ # - this is so old that init-system-helpers didn't exist yet
+ # - we are in a future where init-system-helpers doesn't exist anymore
+ # - something strange is going on
+ # we should only call the hook in the first case
+ ver=$(env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-cache show --no-all-versions base-files 2>/dev/null | sed -ne 's/^Version: \(.*\)$/\1/p' || printf '')
+ if [ -z "$ver" ]; then
+ echo "neither init-system-helpers nor base-files can be installed -- not running jessie-or-older extract01 hook" >&2
+ exit 0
+ fi
+
+ # Jessie is Debian 8
+ if dpkg --compare-versions "$ver" ge 8; then
+ echo "there is no init-system-helpers but base-files version $ver is >= 8 -- not running jessie-or-older extract01 hook" >&2
+ exit 0
+ else
+ echo "there is no init-system-helpers but base-files version $ver is << 8 -- running jessie-or-older extract01 hook" >&2
+ fi
+else
+ if dpkg --compare-versions "$ver" ge 1.34; then
+ echo "init-system-helpers version $ver is >= 1.34 -- not running jessie-or-older extract01 hook" >&2
+ exit 0
+ else
+ echo "init-system-helpers version $ver is << 1.34 -- running jessie-or-older extract01 hook" >&2
+ fi
+fi
+
+# resolve the script path using several methods in order:
+# 1. using dirname -- "$0"
+# 2. using ./hooks
+# 3. using /usr/share/mmdebstrap/hooks/
+for p in "$(dirname -- "$0")/.." ./hooks /usr/share/mmdebstrap/hooks; do
+ if [ -x "$p/jessie-or-older/extract00.sh" ] && [ -x "$p/jessie-or-older/extract01.sh" ]; then
+ "$p/jessie-or-older/extract01.sh" "$1"
+ exit 0
+ fi
+done
+
+echo "cannot find jessie-or-older hook anywhere" >&2
+exit 1
diff --git a/hooks/maybe-merged-usr/essential00.sh b/hooks/maybe-merged-usr/essential00.sh
new file mode 100755
index 0000000..a23f2f7
--- /dev/null
+++ b/hooks/maybe-merged-usr/essential00.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+
+set -eu
+
+ver=$(dpkg-query --root="$1" -f '${db:Status-Status} ${Source} ${Version}' --show usr-is-merged 2>/dev/null || printf '')
+case "$ver" in
+ '')
+ echo "no package called usr-is-merged is installed -- not running merged-usr essential hook" >&2
+ exit 0
+ ;;
+ 'installed mmdebstrap-dummy-usr-is-merged 1')
+ echo "dummy usr-is-merged package installed -- running merged-usr essential hook" >&2
+ ;;
+ 'installed usrmerge '*)
+ echo "usr-is-merged package from src:usrmerge installed -- not running merged-usr essential hook" >&2
+ exit 0
+ ;;
+ *)
+ echo "unexpected situation for package usr-is-merged: $ver" >&2
+ exit 1
+ ;;
+esac
+
+# resolve the script path using several methods in order:
+# 1. using dirname -- "$0"
+# 2. using ./hooks
+# 3. using /usr/share/mmdebstrap/hooks/
+for p in "$(dirname -- "$0")/.." ./hooks /usr/share/mmdebstrap/hooks; do
+ if [ -x "$p/merged-usr/setup00.sh" ] && [ -x "$p/merged-usr/extract00.sh" ] && [ -x "$p/merged-usr/essential00.sh" ]; then
+ "$p/merged-usr/essential00.sh" "$1"
+ exit 0
+ fi
+done
+
+echo "cannot find merged-usr hook anywhere" >&2
+exit 1
diff --git a/hooks/maybe-merged-usr/extract00.sh b/hooks/maybe-merged-usr/extract00.sh
new file mode 100755
index 0000000..dc88450
--- /dev/null
+++ b/hooks/maybe-merged-usr/extract00.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -eu
+
+env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-get update --error-on=any
+
+# if the usr-is-merged package cannot be installed with apt, do nothing
+if ! env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-cache show --no-all-versions usr-is-merged > /dev/null 2>&1; then
+ echo "no package called usr-is-merged found -- not running merged-usr extract hook" >&2
+ exit 0
+else
+ echo "package usr-is-merged found -- running merged-usr extract hook" >&2
+fi
+
+# resolve the script path using several methods in order:
+# 1. using dirname -- "$0"
+# 2. using ./hooks
+# 3. using /usr/share/mmdebstrap/hooks/
+for p in "$(dirname -- "$0")/.." ./hooks /usr/share/mmdebstrap/hooks; do
+ if [ -x "$p/merged-usr/setup00.sh" ] && [ -x "$p/merged-usr/extract00.sh" ] && [ -x "$p/merged-usr/essential00.sh" ]; then
+ "$p/merged-usr/extract00.sh" "$1"
+ exit 0
+ fi
+done
+
+echo "cannot find merged-usr hook anywhere" >&2
+exit 1
diff --git a/hooks/maybe-merged-usr/setup00.sh b/hooks/maybe-merged-usr/setup00.sh
new file mode 100755
index 0000000..a6bd712
--- /dev/null
+++ b/hooks/maybe-merged-usr/setup00.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -eu
+
+env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-get update --error-on=any
+
+# if the usr-is-merged package cannot be installed with apt, do nothing
+if ! env --chdir="$1" APT_CONFIG="$MMDEBSTRAP_APT_CONFIG" apt-cache show --no-all-versions usr-is-merged > /dev/null 2>&1; then
+ echo "no package called usr-is-merged found -- not running merged-usr setup hook" >&2
+ exit 0
+else
+ echo "package usr-is-merged found -- running merged-usr setup hook" >&2
+fi
+
+# resolve the script path using several methods in order:
+# 1. using dirname -- "$0"
+# 2. using ./hooks
+# 3. using /usr/share/mmdebstrap/hooks/
+for p in "$(dirname -- "$0")/.." ./hooks /usr/share/mmdebstrap/hooks; do
+ if [ -x "$p/merged-usr/setup00.sh" ] && [ -x "$p/merged-usr/extract00.sh" ] && [ -x "$p/merged-usr/essential00.sh" ]; then
+ "$p/merged-usr/setup00.sh" "$1"
+ exit 0
+ fi
+done
+
+echo "cannot find merged-usr hook anywhere" >&2
+exit 1
diff --git a/hooks/merged-usr/essential00.sh b/hooks/merged-usr/essential00.sh
new file mode 100755
index 0000000..d9a8130
--- /dev/null
+++ b/hooks/merged-usr/essential00.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+TARGET="$1"
+
+if [ "${MMDEBSTRAP_MODE:-}" = "chrootless" ]; then
+ APT_CONFIG=$MMDEBSTRAP_APT_CONFIG apt-get --yes install \
+ -oDPkg::Chroot-Directory= \
+ -oDPkg::Options::=--force-not-root \
+ -oDPkg::Options::=--force-script-chrootless \
+ -oDPkg::Options::=--root="$TARGET" \
+ -oDPkg::Options::=--log="$TARGET/var/log/dpkg.log" \
+ usr-is-merged
+ export DPKG_ROOT="$TARGET"
+ dpkg-query --showformat '${db:Status-Status}\n' --show usr-is-merged | grep -q '^installed$'
+ dpkg-query --showformat '${Source}\n' --show usr-is-merged | grep -q '^usrmerge$'
+ dpkg --compare-versions "1" "lt" "$(dpkg-query --showformat '${Version}\n' --show usr-is-merged)"
+else
+ APT_CONFIG=$MMDEBSTRAP_APT_CONFIG apt-get --yes install usr-is-merged
+ chroot "$TARGET" dpkg-query --showformat '${db:Status-Status}\n' --show usr-is-merged | grep -q '^installed$'
+ chroot "$TARGET" dpkg-query --showformat '${Source}\n' --show usr-is-merged | grep -q '^usrmerge$'
+ dpkg --compare-versions "1" "lt" "$(chroot "$TARGET" dpkg-query --showformat '${Version}\n' --show usr-is-merged)"
+fi
diff --git a/hooks/merged-usr/extract00.sh b/hooks/merged-usr/extract00.sh
new file mode 100755
index 0000000..7334191
--- /dev/null
+++ b/hooks/merged-usr/extract00.sh
@@ -0,0 +1,85 @@
+#!/bin/sh
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+TARGET="$1"
+
+# can_usrmerge_symlink() and can_usrmerge_symlink() are
+# Copyright 2023 Helmut Grohne <helmut@subdivi.de>
+# and part of the debootstrap source in /usr/share/debootstrap/functions
+# https://salsa.debian.org/installer-team/debootstrap/-/merge_requests/96
+# https://bugs.debian.org/104989
+can_usrmerge_symlink() {
+ # Absolute symlinks can be relocated without problems.
+ test "${2#/}" = "$2" || return 0
+ while :; do
+ if test "${2#/}" != "$2"; then
+ # Handle double-slashes.
+ set -- "$1" "${2#/}"
+ elif test "${2#./}" != "$2"; then
+ # Handle ./ inside a link target.
+ set -- "$1" "${2#./}"
+ elif test "$2" = ..; then
+ # A parent directory symlink is ok if it does not
+ # cross the top level directory.
+ test "${1%/*/*}" != "$1" -a -n "${1%/*/*}"
+ return $?
+ elif test "${2#../}" != "$2"; then
+ # Symbolic link crossing / cannot be moved safely.
+ # This is prohibited by Debian Policy 10.5.
+ test "${1%/*/*}" = "$1" -o -z "${1%/*/*}" && return 1
+ set -- "${1%/*}" "${2#../}"
+ else
+ # Consider the symlink ok if its target does not
+ # contain a parent directory. When we fail here,
+ # the link target is non-minimal and doesn't happen
+ # in the archive.
+ test "${2#*/../}" = "$2"
+ return $?
+ fi
+ done
+}
+
+merge_usr_entry() {
+ # shellcheck disable=SC3043
+ local entry canon
+ canon="$TARGET/usr/${1#"$TARGET/"}"
+ test -h "$canon" &&
+ error 1 USRMERGEFAIL "cannot move %s as its destination exists as a symlink" "${1#"$TARGET"}"
+ if ! test -e "$canon"; then
+ mv "$1" "$canon"
+ return 0
+ fi
+ test -d "$1" ||
+ error 1 USRMERGEFAIL "cannot move non-directory %s as its destination exists" "${1#"$TARGET"}"
+ test -d "$canon" ||
+ error 1 USRMERGEFAIL "cannot move directory %s as its destination is not a directory" "${1#"$TARGET"}"
+ for entry in "$1/"* "$1/."*; do
+ # Some shells return . and .. on dot globs.
+ test "${entry%/.}" != "${entry%/..}" && continue
+ if test -h "$entry" && ! can_usrmerge_symlink "${entry#"$TARGET"}" "$(readlink "$entry")"; then
+ error 1 USRMERGEFAIL "cannot move relative symlink crossing top-level directory" "${entry#"$TARGET"}"
+ fi
+ # Ignore glob match failures
+ if test "${entry%'/*'}" != "${entry%'/.*'}" && ! test -e "$entry"; then
+ continue
+ fi
+ merge_usr_entry "$entry"
+ done
+ rmdir "$1"
+}
+
+# This is list includes all possible multilib directories. It must be
+# updated when new multilib directories are being added. Hopefully,
+# all new architectures use multiarch instead, so we never get to
+# update this.
+for dir in bin lib lib32 lib64 libo32 libx32 sbin; do
+ test -h "$TARGET/$dir" && continue
+ test -e "$TARGET/$dir" || continue
+ merge_usr_entry "$TARGET/$dir"
+ ln -s "usr/$dir" "$TARGET/$dir"
+done
diff --git a/hooks/merged-usr/setup00.sh b/hooks/merged-usr/setup00.sh
new file mode 100755
index 0000000..a6b08d2
--- /dev/null
+++ b/hooks/merged-usr/setup00.sh
@@ -0,0 +1,79 @@
+#!/bin/sh
+#
+# mmdebstrap does have a --merged-usr option but only as a no-op for
+# debootstrap compatibility
+#
+# Using this hook script, you can emulate what debootstrap does to set up
+# merged /usr via directory symlinks, even using the exact same shell function
+# that debootstrap uses by running mmdebstrap with:
+#
+# --setup-hook=/usr/share/mmdebstrap/hooks/merged-usr/setup00.sh
+#
+# Alternatively, you can setup merged-/usr by installing the usrmerge package:
+#
+# --include=usrmerge
+#
+# mmdebstrap will not include this functionality via a --merged-usr option
+# because there are many reasons against implementing merged-/usr that way:
+#
+# https://wiki.debian.org/Teams/Dpkg/MergedUsr
+# https://wiki.debian.org/Teams/Dpkg/FAQ#Q:_Does_dpkg_support_merged-.2Fusr-via-aliased-dirs.3F
+# https://lists.debian.org/20190219044924.GB21901@gaara.hadrons.org
+# https://lists.debian.org/YAkLOMIocggdprSQ@thunder.hadrons.org
+# https://lists.debian.org/20181223030614.GA8788@gaara.hadrons.org
+#
+# In addition, the merged-/usr-via-aliased-dirs approach violates an important
+# principle of component based software engineering one of the core design
+# ideas/goals of mmdebstrap: All the information to create a chroot of a Debian
+# based distribution should be included in its packages and their metadata.
+# Using directory symlinks as used by debootstrap contradicts this principle.
+# The information whether a distribution uses this approach to merged-/usr or
+# not is not anymore contained in its packages but in a tool from the outside.
+#
+# Example real world problem: I'm using debbisect to bisect Debian unstable
+# between 2015 and today. For which snapshot.d.o timestamp should a merged-/usr
+# chroot be created and for which ones not?
+#
+# The problem is not the idea of merged-/usr but the problem is the way how it
+# got implemented in debootstrap via directory symlinks. That way of rolling
+# out merged-/usr is bad from the dpkg point-of-view and completely opposite of
+# the vision with which in mind I wrote mmdebstrap.
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+TARGET="$1"
+
+# now install an empty "usr-is-merged" package to avoid installing the
+# usrmerge package on this system even after init-system-helpers starts
+# depending on "usrmerge | usr-is-merged".
+#
+# This package will not end up in the final chroot because the essential
+# hook replaces it with the actual usr-is-merged package from src:usrmerge.
+
+tmpdir=$(mktemp --directory --tmpdir="$TARGET/tmp")
+mkdir -p "$tmpdir/usr-is-merged/DEBIAN"
+
+cat << END > "$tmpdir/usr-is-merged/DEBIAN/control"
+Package: usr-is-merged
+Priority: optional
+Section: oldlibs
+Maintainer: Johannes Schauer Marin Rodrigues <josch@debian.org>
+Architecture: all
+Multi-Arch: foreign
+Source: mmdebstrap-dummy-usr-is-merged
+Version: 1
+Description: dummy package created by mmdebstrap merged-usr setup hook
+ This package was generated and installed by the mmdebstrap merged-usr
+ setup hook at /usr/share/mmdebstrap/hooks/merged-usr.
+ .
+ If this package is installed in the final chroot, then this is a bug
+ in mmdebstrap. Please report: https://gitlab.mister-muffin.de/josch/mmdebstrap
+END
+dpkg-deb --build "$tmpdir/usr-is-merged" "$tmpdir/usr-is-merged.deb"
+dpkg --root="$TARGET" --log="$TARGET/var/log/dpkg.log" --install "$tmpdir/usr-is-merged.deb"
+rm "$tmpdir/usr-is-merged.deb" "$tmpdir/usr-is-merged/DEBIAN/control"
+rmdir "$tmpdir/usr-is-merged/DEBIAN" "$tmpdir/usr-is-merged" "$tmpdir"
diff --git a/hooks/no-merged-usr/essential00.sh b/hooks/no-merged-usr/essential00.sh
new file mode 120000
index 0000000..5360c8f
--- /dev/null
+++ b/hooks/no-merged-usr/essential00.sh
@@ -0,0 +1 @@
+../merged-usr/essential00.sh \ No newline at end of file
diff --git a/hooks/no-merged-usr/setup00.sh b/hooks/no-merged-usr/setup00.sh
new file mode 100755
index 0000000..df50499
--- /dev/null
+++ b/hooks/no-merged-usr/setup00.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+#
+# mmdebstrap does have a --no-merged-usr option but only as a no-op for
+# debootstrap compatibility
+#
+# Using this hook script, you can emulate what debootstrap does to set up
+# a system without merged-/usr even after the essential init-system-helpers
+# package added a dependency on "usrmerge | usr-is-merged". By installing
+# a dummy usr-is-merged package, it avoids pulling in the dependencies of
+# the usrmerge package.
+
+set -eu
+
+if [ "${MMDEBSTRAP_VERBOSITY:-1}" -ge 3 ]; then
+ set -x
+fi
+
+TARGET="$1"
+
+echo "Warning: starting with Debian 12 (Bookworm), systems without merged-/usr are not supported anymore" >&2
+echo "Warning: starting with Debian 13 (Trixie), merged-/usr symlinks are shipped by packages in the essential-set making this hook ineffective" >&2
+
+echo "this system will not be supported in the future" > "$TARGET/etc/unsupported-skip-usrmerge-conversion"
+
+# now install an empty "usr-is-merged" package to avoid installing the
+# usrmerge package on this system even after init-system-helpers starts
+# depending on "usrmerge | usr-is-merged".
+#
+# This package will not end up in the final chroot because the essential
+# hook replaces it with the actual usr-is-merged package from src:usrmerge.
+
+tmpdir=$(mktemp --directory --tmpdir="$TARGET/tmp")
+mkdir -p "$tmpdir/usr-is-merged/DEBIAN"
+
+cat << END > "$tmpdir/usr-is-merged/DEBIAN/control"
+Package: usr-is-merged
+Priority: optional
+Section: oldlibs
+Maintainer: Johannes Schauer Marin Rodrigues <josch@debian.org>
+Architecture: all
+Multi-Arch: foreign
+Source: mmdebstrap-dummy-usr-is-merged
+Version: 1
+Description: dummy package created by mmdebstrap no-merged-usr setup hook
+ This package was generated and installed by the mmdebstrap no-merged-usr
+ setup hook at /usr/share/mmdebstrap/hooks/no-merged-usr.
+ .
+ If this package is installed in the final chroot, then this is a bug
+ in mmdebstrap. Please report: https://gitlab.mister-muffin.de/josch/mmdebstrap
+END
+dpkg-deb --build "$tmpdir/usr-is-merged" "$tmpdir/usr-is-merged.deb"
+dpkg --root="$TARGET" --log="$TARGET/var/log/dpkg.log" --install "$tmpdir/usr-is-merged.deb"
+rm "$tmpdir/usr-is-merged.deb" "$tmpdir/usr-is-merged/DEBIAN/control"
+rmdir "$tmpdir/usr-is-merged/DEBIAN" "$tmpdir/usr-is-merged" "$tmpdir"
diff --git a/ldconfig.fakechroot b/ldconfig.fakechroot
new file mode 100755
index 0000000..c3455af
--- /dev/null
+++ b/ldconfig.fakechroot
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+#
+# This script is in the public domain
+#
+# Author: Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
+#
+# This is command substitution for ldconfig under fakechroot:
+#
+# export FAKECHROOT_CMD_SUBST=/sbin/ldconfig=/path/to/ldconfig.fakechroot
+#
+# Statically linked binaries cannot work with fakechroot and thus have to be
+# replaced by either /bin/true or a more clever solution like this one. The
+# ldconfig command supports the -r option which allows passing a chroot
+# directory for ldconfig to work in. This can be used to run ldconfig without
+# fakechroot but still let it create /etc/ld.so.cache inside the chroot.
+#
+# Since absolute symlinks are broken without fakechroot to translate them,
+# we read /etc/ld.so.conf and turn all absolute symlink shared libraries into
+# relative ones. At program exit, the original state is restored.
+
+
+import os
+import sys
+import subprocess
+import atexit
+import glob
+from pathlib import Path
+
+symlinks = []
+
+
+def restore_symlinks():
+ for (link, target, atime, mtime) in symlinks:
+ link.unlink()
+ link.symlink_to(target)
+ os.utime(link, times=None, ns=(atime, mtime), follow_symlinks=False)
+
+
+atexit.register(restore_symlinks)
+
+
+def get_libdirs(chroot, configs):
+ res = []
+ for conf in configs:
+ for line in (Path(conf)).read_text().splitlines():
+ line = line.strip()
+ if not line:
+ continue
+ if line.startswith("#"):
+ continue
+ if line.startswith("include "):
+ assert line.startswith("include /")
+ res.extend(
+ get_libdirs(chroot, chroot.glob(line.removeprefix("include /")))
+ )
+ continue
+ assert line.startswith("/"), line
+ line = line.lstrip("/")
+ if not (chroot / Path(line)).is_dir():
+ continue
+ for f in (chroot / Path(line)).iterdir():
+ if not f.is_symlink():
+ continue
+ linktarget = f.readlink()
+ # make sure that the linktarget is an absolute path inside the
+ # chroot
+ if not str(linktarget).startswith("/"):
+ continue
+ if chroot not in linktarget.parents:
+ continue
+ # store original link so that we can restore it later
+ symlinks.append(
+ (f, linktarget, f.lstat().st_atime_ns, f.lstat().st_mtime_ns)
+ )
+ # replace absolute symlink by relative link
+ relative = os.path.relpath(linktarget, f.parent)
+ f.unlink()
+ f.symlink_to(relative)
+ return res
+
+
+def main():
+ if "FAKECHROOT_BASE_ORIG" not in os.environ:
+ print("FAKECHROOT_BASE_ORIG is not set", file=sys.stderr)
+ print(
+ "must be executed under fakechroot using FAKECHROOT_CMD_SUBST",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ chroot = Path(os.environ["FAKECHROOT_BASE_ORIG"])
+
+ # if chrootless mode is used from within a fakechroot chroot, then
+ # FAKECHROOT_BASE_ORIG will point at the outer chroot. We want to use
+ # the path from DPKG_ROOT inside of that instead
+ if os.environ.get("DPKG_ROOT", "") not in ["", "/"]:
+ chroot /= os.environ["DPKG_ROOT"].lstrip("/")
+
+ if not (chroot / "sbin" / "ldconfig").exists():
+ sys.exit(0)
+
+ (chroot / "var" / "cache" / "ldconfig").mkdir(
+ mode=0o700, parents=True, exist_ok=True
+ )
+
+ for d in get_libdirs(chroot, [chroot / "etc" / "ld.so.conf"]):
+ make_relative(d)
+
+ rootarg = chroot
+ argv = sys.argv[1:]
+ for arg in sys.argv[1:]:
+ if arg == "-r":
+ rootarg = None
+ elif rootarg is None:
+ argpath = Path(arg)
+ if argpath.is_absolute():
+ rootarg = chroot / argpath.relative_to("/")
+ else:
+ rootarg = Path.cwd() / argpath
+ if rootarg is None:
+ rootarg = chroot
+
+ # we add any additional arguments before "-r" such that any other "-r"
+ # option will be overwritten by the one we set
+ subprocess.check_call(
+ [chroot / "sbin" / "ldconfig"] + sys.argv[1:] + ["-r", rootarg]
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/make_mirror.sh b/make_mirror.sh
new file mode 100755
index 0000000..8849ee3
--- /dev/null
+++ b/make_mirror.sh
@@ -0,0 +1,568 @@
+#!/bin/sh
+
+set -eu
+
+# This script fills either cache.A or cache.B with new content and then
+# atomically switches the cache symlink from one to the other at the end.
+# This way, at no point will the cache be in an non-working state, even
+# when this script got canceled at any point.
+# Working with two directories also automatically prunes old packages in
+# the local repository.
+
+deletecache() {
+ dir="$1"
+ echo "running deletecache $dir">&2
+ if [ ! -e "$dir" ]; then
+ return
+ fi
+ if [ ! -e "$dir/mmdebstrapcache" ]; then
+ echo "$dir cannot be the mmdebstrap cache" >&2
+ return 1
+ fi
+ # be very careful with removing the old directory
+ # experimental is pulled in with USE_HOST_APT_CONFIG=yes on debci
+ # when testing a package from experimental
+ for dist in oldstable stable testing unstable experimental; do
+ # deleting artifacts from test "debootstrap"
+ for variant in minbase buildd -; do
+ if [ -e "$dir/debian-$dist-$variant.tar" ]; then
+ rm "$dir/debian-$dist-$variant.tar"
+ else
+ echo "does not exist: $dir/debian-$dist-$variant.tar" >&2
+ fi
+ done
+ # deleting artifacts from test "mmdebstrap"
+ for variant in essential apt minbase buildd - standard; do
+ for format in tar ext2 squashfs; do
+ if [ -e "$dir/mmdebstrap-$dist-$variant.$format" ]; then
+ # attempt to delete for all dists because DEFAULT_DIST might've been different the last time
+ rm "$dir/mmdebstrap-$dist-$variant.$format"
+ elif [ "$dist" = "$DEFAULT_DIST" ]; then
+ # only warn about non-existance when it's expected to exist
+ echo "does not exist: $dir/mmdebstrap-$dist-$variant.$format" >&2
+ fi
+ done
+ done
+ if [ -e "$dir/debian/dists/$dist" ]; then
+ rm --one-file-system --recursive "$dir/debian/dists/$dist"
+ else
+ echo "does not exist: $dir/debian/dists/$dist" >&2
+ fi
+ case "$dist" in oldstable|stable)
+ if [ -e "$dir/debian/dists/$dist-updates" ]; then
+ rm --one-file-system --recursive "$dir/debian/dists/$dist-updates"
+ else
+ echo "does not exist: $dir/debian/dists/$dist-updates" >&2
+ fi
+ ;;
+ esac
+ case "$dist" in oldstable|stable)
+ if [ -e "$dir/debian-security/dists/$dist-security" ]; then
+ rm --one-file-system --recursive "$dir/debian-security/dists/$dist-security"
+ else
+ echo "does not exist: $dir/debian-security/dists/$dist-security" >&2
+ fi
+ ;;
+ esac
+ done
+ for f in "$dir/debian-"*.ext4; do
+ if [ -e "$f" ]; then
+ rm --one-file-system "$f"
+ fi
+ done
+ # on i386 and amd64, the intel-microcode and amd64-microcode packages
+ # from non-free-firwame get pulled in because they are
+ # priority:standard with USE_HOST_APT_CONFIG=yes
+ for c in main non-free-firmware; do
+ if [ -e "$dir/debian/pool/$c" ]; then
+ rm --one-file-system --recursive "$dir/debian/pool/$c"
+ else
+ echo "does not exist: $dir/debian/pool/$c" >&2
+ fi
+ done
+ if [ -e "$dir/debian-security/pool/updates/main" ]; then
+ rm --one-file-system --recursive "$dir/debian-security/pool/updates/main"
+ else
+ echo "does not exist: $dir/debian-security/pool/updates/main" >&2
+ fi
+ for i in $(seq 1 6); do
+ if [ ! -e "$dir/debian$i" ]; then
+ continue
+ fi
+ rm "$dir/debian$i"
+ done
+ rm "$dir/mmdebstrapcache"
+ # remove all symlinks
+ find "$dir" -type l -delete
+
+ # now the rest should only be empty directories
+ if [ -e "$dir" ]; then
+ find "$dir" -depth -print0 | xargs -0 --no-run-if-empty rmdir
+ else
+ echo "does not exist: $dir" >&2
+ fi
+}
+
+cleanup_newcachedir() {
+ echo "running cleanup_newcachedir"
+ deletecache "$newcachedir"
+}
+
+cleanupapt() {
+ echo "running cleanupapt" >&2
+ if [ ! -e "$rootdir" ]; then
+ return
+ fi
+ for f in \
+ "$rootdir/var/cache/apt/archives/"*.deb \
+ "$rootdir/var/cache/apt/archives/partial/"*.deb \
+ "$rootdir/var/cache/apt/"*.bin \
+ "$rootdir/var/lib/apt/lists/"* \
+ "$rootdir/var/lib/dpkg/status" \
+ "$rootdir/var/lib/dpkg/lock-frontend" \
+ "$rootdir/var/lib/dpkg/lock" \
+ "$rootdir/var/lib/apt/lists/lock" \
+ "$rootdir/etc/apt/apt.conf" \
+ "$rootdir/etc/apt/sources.list.d/"* \
+ "$rootdir/etc/apt/preferences.d/"* \
+ "$rootdir/etc/apt/sources.list" \
+ "$rootdir/var/cache/apt/archives/lock"; do
+ if [ ! -e "$f" ]; then
+ echo "does not exist: $f" >&2
+ continue
+ fi
+ if [ -d "$f" ]; then
+ rmdir "$f"
+ else
+ rm "$f"
+ fi
+ done
+ find "$rootdir" -depth -print0 | xargs -0 --no-run-if-empty rmdir
+}
+
+# note: this function uses brackets instead of curly braces, so that it's run
+# in its own process and we can handle traps independent from the outside
+update_cache() (
+ dist="$1"
+ nativearch="$2"
+
+ # use a subdirectory of $newcachedir so that we can use
+ # hardlinks
+ rootdir="$newcachedir/apt"
+ mkdir -p "$rootdir"
+
+ # we only set this trap here and overwrite the previous trap, because
+ # the update_cache function is run as part of a pipe and thus in its
+ # own process which will EXIT after it finished
+ trap 'kill "$PROXYPID" || :;cleanupapt' EXIT INT TERM
+
+ for p in /etc/apt/apt.conf.d /etc/apt/sources.list.d /etc/apt/preferences.d /var/cache/apt/archives /var/lib/apt/lists/partial /var/lib/dpkg; do
+ mkdir -p "$rootdir/$p"
+ done
+
+ # read sources.list content from stdin
+ cat > "$rootdir/etc/apt/sources.list"
+
+ cat << END > "$rootdir/etc/apt/apt.conf"
+Apt::Architecture "$nativearch";
+Apt::Architectures "$nativearch";
+Dir::Etc "$rootdir/etc/apt";
+Dir::State "$rootdir/var/lib/apt";
+Dir::Cache "$rootdir/var/cache/apt";
+Apt::Install-Recommends false;
+Apt::Get::Download-Only true;
+Acquire::Languages "none";
+Dir::Etc::Trusted "/etc/apt/trusted.gpg";
+Dir::Etc::TrustedParts "/etc/apt/trusted.gpg.d";
+Acquire::http::Proxy "http://127.0.0.1:8080/";
+END
+
+ : > "$rootdir/var/lib/dpkg/status"
+
+ if [ "$dist" = "$DEFAULT_DIST" ] && [ "$nativearch" = "$HOSTARCH" ] && [ "$USE_HOST_APT_CONFIG" = "yes" ]; then
+ # we append sources and settings instead of overwriting after
+ # an empty line
+ for f in /etc/apt/sources.list /etc/apt/sources.list.d/*; do
+ [ -e "$f" ] || continue
+ [ -e "$rootdir/$f" ] && echo >> "$rootdir/$f"
+ # Filter out file:// repositories as they are added
+ # to each mmdebstrap call verbatim by
+ # debian/tests/copy_host_apt_config
+ # Also filter out all mirrors that are not of suite
+ # $DEFAULT_DIST, except experimental if the suite
+ # is unstable. This prevents packages from
+ # unstable entering a testing mirror.
+ if [ "$dist" = unstable ]; then
+ grep -v ' file://' "$f" \
+ | grep -E " (unstable|experimental) " \
+ >> "$rootdir/$f" || :
+ else
+ grep -v ' file://' "$f" \
+ | grep " $DEFAULT_DIST " \
+ >> "$rootdir/$f" || :
+ fi
+ done
+ for f in /etc/apt/preferences.d/*; do
+ [ -e "$f" ] || continue
+ [ -e "$rootdir/$f" ] && echo >> "$rootdir/$f"
+ cat "$f" >> "$rootdir/$f"
+ done
+ fi
+
+ echo "creating mirror for $dist" >&2
+ for f in /etc/apt/sources.list /etc/apt/sources.list.d/* /etc/apt/preferences.d/*; do
+ [ -e "$rootdir/$f" ] || continue
+ echo "contents of $f:" >&2
+ cat "$rootdir/$f" >&2
+ done
+
+ APT_CONFIG="$rootdir/etc/apt/apt.conf" apt-get update --error-on=any
+
+ pkgs=$(APT_CONFIG="$rootdir/etc/apt/apt.conf" apt-get indextargets \
+ --format '$(FILENAME)' 'Created-By: Packages' "Architecture: $nativearch" \
+ | xargs --delimiter='\n' /usr/lib/apt/apt-helper cat-file \
+ | grep-dctrl --no-field-names --show-field=Package --exact-match \
+ \( --field=Essential yes --or --field=Priority required \
+ --or --field=Priority important --or --field=Priority standard \
+ \))
+
+ pkgs="$pkgs build-essential busybox gpg eatmydata fakechroot fakeroot"
+
+ # we need usr-is-merged to simulate debootstrap behaviour for all dists
+ # starting from Debian 12 (Bullseye)
+ case "$dist" in
+ oldstable) : ;;
+ *) pkgs="$pkgs usr-is-merged usrmerge" ;;
+ esac
+
+ # shellcheck disable=SC2086
+ APT_CONFIG="$rootdir/etc/apt/apt.conf" apt-get --yes install $pkgs
+
+ rm "$rootdir/var/cache/apt/archives/lock"
+ rmdir "$rootdir/var/cache/apt/archives/partial"
+ APT_CONFIG="$rootdir/etc/apt/apt.conf" apt-get --option Dir::Etc::SourceList=/dev/null update
+ APT_CONFIG="$rootdir/etc/apt/apt.conf" apt-get clean
+
+ cleanupapt
+
+ # this function is run in its own process, so we unset all traps before
+ # returning
+ trap "-" EXIT INT TERM
+)
+
+check_proxy_running() {
+ if timeout 1 bash -c 'exec 3<>/dev/tcp/127.0.0.1/8080 && printf "GET http://deb.debian.org/debian/dists/'"$DEFAULT_DIST"'/InRelease HTTP/1.1\nHost: deb.debian.org\n\n" >&3 && grep "Suite: '"$DEFAULT_DIST"'" <&3 >/dev/null' 2>/dev/null; then
+ return 0
+ elif timeout 1 env http_proxy="http://127.0.0.1:8080/" wget --quiet -O - "http://deb.debian.org/debian/dists/$DEFAULT_DIST/InRelease" | grep "Suite: $DEFAULT_DIST" >/dev/null; then
+ return 0
+ elif timeout 1 curl --proxy "http://127.0.0.1:8080/" --silent "http://deb.debian.org/debian/dists/$DEFAULT_DIST/InRelease" | grep "Suite: $DEFAULT_DIST" >/dev/null; then
+ return 0
+ fi
+ return 1
+}
+
+if [ -e "./shared/cache.A" ] && [ -e "./shared/cache.B" ]; then
+ echo "both ./shared/cache.A and ./shared/cache.B exist" >&2
+ echo "was a former run of the script aborted?" >&2
+ if [ -e ./shared/cache ]; then
+ echo "cache symlink points to $(readlink ./shared/cache)" >&2
+ case "$(readlink ./shared/cache)" in
+ cache.A)
+ echo "removing ./shared/cache.B" >&2
+ rm -r ./shared/cache.B
+ ;;
+ cache.B)
+ echo "removing ./shared/cache.A" >&2
+ rm -r ./shared/cache.A
+ ;;
+ *)
+ echo "unexpected" >&2
+ exit 1
+ ;;
+ esac
+ else
+ echo "./shared/cache doesn't exist" >&2
+ exit 1
+ fi
+fi
+
+if [ -e "./shared/cache.A" ]; then
+ oldcache=cache.A
+ newcache=cache.B
+else
+ oldcache=cache.B
+ newcache=cache.A
+fi
+
+oldcachedir="./shared/$oldcache"
+newcachedir="./shared/$newcache"
+
+oldmirrordir="$oldcachedir/debian"
+newmirrordir="$newcachedir/debian"
+
+mirror="http://deb.debian.org/debian"
+security_mirror="http://security.debian.org/debian-security"
+components=main
+
+: "${DEFAULT_DIST:=unstable}"
+: "${ONLY_DEFAULT_DIST:=no}"
+: "${ONLY_HOSTARCH:=no}"
+: "${HAVE_QEMU:=yes}"
+: "${RUN_MA_SAME_TESTS:=yes}"
+# by default, use the mmdebstrap executable in the current directory
+: "${CMD:=./mmdebstrap}"
+: "${USE_HOST_APT_CONFIG:=no}"
+: "${FORCE_UPDATE:=no}"
+
+if [ "$FORCE_UPDATE" != "yes" ] && [ -e "$oldmirrordir/dists/$DEFAULT_DIST/InRelease" ]; then
+ http_code=$(curl --output /dev/null --silent --location --head --time-cond "$oldmirrordir/dists/$DEFAULT_DIST/InRelease" --write-out '%{http_code}' "$mirror/dists/$DEFAULT_DIST/InRelease")
+ case "$http_code" in
+ 200) ;; # need update
+ 304) echo up-to-date; exit 0;;
+ *) echo "unexpected status: $http_code"; exit 1;;
+ esac
+fi
+
+./caching_proxy.py "$oldcachedir" "$newcachedir" &
+PROXYPID=$!
+trap 'kill "$PROXYPID" || :' EXIT INT TERM
+
+for i in $(seq 10); do
+ check_proxy_running && break
+ sleep 1
+done
+if [ ! -s "$newmirrordir/dists/$DEFAULT_DIST/InRelease" ]; then
+ echo "failed to start proxy" >&2
+ kill $PROXYPID
+ exit 1
+fi
+
+trap 'kill "$PROXYPID" || :;cleanup_newcachedir' EXIT INT TERM
+
+mkdir -p "$newcachedir"
+touch "$newcachedir/mmdebstrapcache"
+
+HOSTARCH=$(dpkg --print-architecture)
+arches="$HOSTARCH"
+if [ "$HOSTARCH" = amd64 ]; then
+ arches="$arches arm64 i386"
+elif [ "$HOSTARCH" = arm64 ]; then
+ arches="$arches amd64 armhf"
+fi
+
+# we need the split_inline_sig() function
+# shellcheck disable=SC1091
+. /usr/share/debootstrap/functions
+
+for dist in oldstable stable testing unstable; do
+ for nativearch in $arches; do
+ # non-host architectures are only downloaded for $DEFAULT_DIST
+ if [ "$nativearch" != "$HOSTARCH" ] && [ "$DEFAULT_DIST" != "$dist" ]; then
+ continue
+ fi
+ # if ONLY_DEFAULT_DIST is set, only download DEFAULT_DIST
+ if [ "$ONLY_DEFAULT_DIST" = "yes" ] && [ "$DEFAULT_DIST" != "$dist" ]; then
+ continue
+ fi
+ if [ "$ONLY_HOSTARCH" = "yes" ] && [ "$nativearch" != "$HOSTARCH" ]; then
+ continue
+ fi
+ # we need a first pass without updates and security patches
+ # because otherwise, old package versions needed by
+ # debootstrap will not get included
+ echo "deb [arch=$nativearch] $mirror $dist $components" | update_cache "$dist" "$nativearch"
+ # we need to include the base mirror again or otherwise
+ # packages like build-essential will be missing
+ case "$dist" in oldstable|stable)
+ cat << END | update_cache "$dist" "$nativearch"
+deb [arch=$nativearch] $mirror $dist $components
+deb [arch=$nativearch] $mirror $dist-updates main
+deb [arch=$nativearch] $security_mirror $dist-security main
+END
+ ;;
+ esac
+ done
+ codename=$(awk '/^Codename: / { print $2; }' < "$newmirrordir/dists/$dist/InRelease")
+ ln -s "$dist" "$newmirrordir/dists/$codename"
+
+ # split the InRelease file into Release and Release.gpg not because apt
+ # or debootstrap need it that way but because grep-dctrl does
+ split_inline_sig \
+ "$newmirrordir/dists/$dist/InRelease" \
+ "$newmirrordir/dists/$dist/Release" \
+ "$newmirrordir/dists/$dist/Release.gpg"
+ touch --reference="$newmirrordir/dists/$dist/InRelease" "$newmirrordir/dists/$dist/Release" "$newmirrordir/dists/$dist/Release.gpg"
+done
+
+kill $PROXYPID
+
+# Create some symlinks so that we can trick apt into accepting multiple apt
+# lines that point to the same repository but look different. This is to
+# avoid the warning:
+# W: Target Packages (main/binary-all/Packages) is configured multiple times...
+for i in $(seq 1 6); do
+ ln -s debian "$newcachedir/debian$i"
+done
+
+tmpdir=""
+
+cleanuptmpdir() {
+ if [ -z "$tmpdir" ]; then
+ return
+ fi
+ if [ ! -e "$tmpdir" ]; then
+ return
+ fi
+ for f in "$tmpdir/worker.sh" "$tmpdir/mmdebstrap.service"; do
+ if [ ! -e "$f" ]; then
+ echo "does not exist: $f" >&2
+ continue
+ fi
+ rm "$f"
+ done
+ rmdir "$tmpdir"
+}
+
+SOURCE_DATE_EPOCH="$(date --date="$(grep-dctrl -s Date -n '' "$newmirrordir/dists/$DEFAULT_DIST/Release")" +%s)"
+export SOURCE_DATE_EPOCH
+
+if [ "$HAVE_QEMU" = "yes" ]; then
+ # we use the caching proxy again when building the qemu image
+ # - we can re-use the packages that were already downloaded earlier
+ # - we make sure that the qemu image uses the same Release file even
+ # if a mirror push happened between now and earlier
+ # - we avoid polluting the mirror with the additional packages by
+ # using --readonly
+ ./caching_proxy.py --readonly "$oldcachedir" "$newcachedir" &
+ PROXYPID=$!
+
+ for i in $(seq 10); do
+ check_proxy_running && break
+ sleep 1
+ done
+ if [ ! -s "$newmirrordir/dists/$DEFAULT_DIST/InRelease" ]; then
+ echo "failed to start proxy" >&2
+ kill $PROXYPID
+ exit 1
+ fi
+
+ tmpdir="$(mktemp -d)"
+ trap 'kill "$PROXYPID" || :;cleanuptmpdir; cleanup_newcachedir' EXIT INT TERM
+
+ pkgs=perl-doc,systemd-sysv,perl,arch-test,fakechroot,fakeroot,mount,uidmap,qemu-user-static,qemu-user,dpkg-dev,mini-httpd,libdevel-cover-perl,libtemplate-perl,debootstrap,procps,apt-cudf,aspcud,python3,libcap2-bin,gpg,debootstrap,distro-info-data,iproute2,ubuntu-keyring,apt-utils,squashfs-tools-ng,genext2fs,linux-image-generic,passwd
+ if [ ! -e ./mmdebstrap ]; then
+ pkgs="$pkgs,mmdebstrap"
+ fi
+ arches=$HOSTARCH
+ if [ "$RUN_MA_SAME_TESTS" = "yes" ]; then
+ case "$HOSTARCH" in
+ amd64)
+ arches=amd64,arm64
+ pkgs="$pkgs,libfakechroot:arm64,libfakeroot:arm64"
+ ;;
+ arm64)
+ arches=arm64,amd64
+ pkgs="$pkgs,libfakechroot:amd64,libfakeroot:amd64"
+ ;;
+ esac
+ fi
+
+ cat << END > "$tmpdir/mmdebstrap.service"
+[Unit]
+Description=mmdebstrap worker script
+
+[Service]
+Type=oneshot
+ExecStart=/worker.sh
+
+[Install]
+WantedBy=multi-user.target
+END
+ # here is something crazy:
+ # as we run mmdebstrap, the process ends up being run by different users with
+ # different privileges (real or fake). But for being able to collect
+ # Devel::Cover data, they must all share a single directory. The only way that
+ # I found to make this work is to mount the database directory with a
+ # filesystem that doesn't support ownership information at all and a umask that
+ # gives read/write access to everybody.
+ # https://github.com/pjcj/Devel--Cover/issues/223
+ cat << 'END' > "$tmpdir/worker.sh"
+#!/bin/sh
+echo 'root:root' | chpasswd
+mount -t 9p -o trans=virtio,access=any,msize=128k mmdebstrap /mnt
+# need to restart mini-httpd because we mounted different content into www-root
+systemctl restart mini-httpd
+
+ip link set enp0s1 down || :
+
+handler () {
+ while IFS= read -r line || [ -n "$line" ]; do
+ printf "%s %s: %s\n" "$(date -u -d "0 $(date +%s.%3N) seconds - $2 seconds" +"%T.%3N")" "$1" "$line"
+ done
+}
+
+(
+ cd /mnt;
+ if [ -e cover_db.img ]; then
+ mkdir -p cover_db
+ mount -o loop,umask=000 cover_db.img cover_db
+ fi
+
+ now=$(date +%s.%3N)
+ ret=0
+ { { { { {
+ sh -x ./test.sh 2>&1 1>&4 3>&- 4>&-; echo $? >&2;
+ } | handler E "$now" >&3;
+ } 4>&1 | handler O "$now" >&3;
+ } 2>&1;
+ } | { read xs; exit $xs; };
+ } 3>&1 || ret=$?
+ echo $ret > /mnt/exitstatus.txt
+ if [ -e cover_db.img ]; then
+ df -h cover_db
+ umount cover_db
+ fi
+) > /mnt/output.txt 2>&1
+umount /mnt
+systemctl poweroff
+END
+ chmod +x "$tmpdir/worker.sh"
+ if [ -z ${DISK_SIZE+x} ]; then
+ DISK_SIZE=10G
+ fi
+ # set PATH to pick up the correct mmdebstrap variant
+ env PATH="$(dirname "$(realpath --canonicalize-existing "$CMD")"):$PATH" \
+ debvm-create --skip=usrmerge,systemdnetwork \
+ --size="$DISK_SIZE" --release="$DEFAULT_DIST" \
+ --output="$newcachedir/debian-$DEFAULT_DIST.ext4" -- \
+ --architectures="$arches" --include="$pkgs" \
+ --setup-hook='echo "Acquire::http::Proxy \"http://127.0.0.1:8080/\";" > "$1/etc/apt/apt.conf.d/00proxy"' \
+ --hook-dir=/usr/share/mmdebstrap/hooks/maybe-merged-usr \
+ --customize-hook='rm "$1/etc/apt/apt.conf.d/00proxy"' \
+ --customize-hook='mkdir -p "$1/etc/systemd/system/multi-user.target.wants"' \
+ --customize-hook='ln -s ../mmdebstrap.service "$1/etc/systemd/system/multi-user.target.wants/mmdebstrap.service"' \
+ --customize-hook='touch "$1/mmdebstrap-testenv"' \
+ --customize-hook='copy-in "'"$tmpdir"'/mmdebstrap.service" /etc/systemd/system/' \
+ --customize-hook='copy-in "'"$tmpdir"'/worker.sh" /' \
+ --customize-hook='echo 127.0.0.1 localhost > "$1/etc/hosts"' \
+ --customize-hook='printf "START=1\nDAEMON_OPTS=\"-h 127.0.0.1 -p 80 -u nobody -dd /mnt/cache -i /var/run/mini-httpd.pid -T UTF-8\"\n" > "$1/etc/default/mini-httpd"' \
+ "$mirror"
+
+ kill $PROXYPID
+ cleanuptmpdir
+ trap "cleanup_newcachedir" EXIT INT TERM
+fi
+
+# delete possibly leftover symlink
+if [ -e ./shared/cache.tmp ]; then
+ rm ./shared/cache.tmp
+fi
+# now atomically switch the symlink to point to the other directory
+ln -s $newcache ./shared/cache.tmp
+mv --no-target-directory ./shared/cache.tmp ./shared/cache
+
+deletecache "$oldcachedir"
+
+trap - EXIT INT TERM
+
+echo "$0 finished successfully" >&2
diff --git a/mmdebstrap b/mmdebstrap
new file mode 100755
index 0000000..b7f11e6
--- /dev/null
+++ b/mmdebstrap
@@ -0,0 +1,7897 @@
+#!/usr/bin/perl
+#
+# © 2018 - 2023 Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# The software is provided "as is", without warranty of any kind, express or
+# implied, including but not limited to the warranties of merchantability,
+# fitness for a particular purpose and noninfringement. In no event shall the
+# authors or copyright holders be liable for any claim, damages or other
+# liability, whether in an action of contract, tort or otherwise, arising
+# from, out of or in connection with the software or the use or other dealings
+# in the software.
+
+use strict;
+use warnings;
+
+our $VERSION = '1.4.3';
+
+use English;
+use Getopt::Long;
+use Pod::Usage;
+use File::Copy;
+use File::Path qw(make_path);
+use File::Temp qw(tempfile tempdir);
+use File::Basename;
+use File::Find;
+use Cwd qw(abs_path getcwd);
+require "syscall.ph"; ## no critic (Modules::RequireBarewordIncludes)
+require "sys/ioctl.ph"; ## no critic (Modules::RequireBarewordIncludes)
+use Fcntl qw(S_IFCHR S_IFBLK FD_CLOEXEC F_GETFD F_SETFD);
+use List::Util qw(any none);
+use POSIX
+ qw(SIGINT SIGHUP SIGPIPE SIGTERM SIG_BLOCK SIG_UNBLOCK strftime isatty);
+use Carp;
+use Term::ANSIColor;
+use Socket;
+use Time::HiRes;
+use Math::BigInt;
+use Text::ParseWords;
+use version;
+
+## no critic (InputOutput::RequireBriefOpen)
+
+# from sched.h
+# use typeglob constants because "use constant" has several drawback as
+# explained in the documentation for the Readonly CPAN module
+*CLONE_NEWNS = \0x20000; # mount namespace
+*CLONE_NEWUTS = \0x4000000; # utsname
+*CLONE_NEWIPC = \0x8000000; # ipc
+*CLONE_NEWUSER = \0x10000000; # user
+*CLONE_NEWPID = \0x20000000; # pid
+*CLONE_NEWNET = \0x40000000; # net
+*_LINUX_CAPABILITY_VERSION_3 = \0x20080522;
+*CAP_SYS_ADMIN = \21;
+*PR_CAPBSET_READ = \23;
+# from sys/mount.h
+*MS_BIND = \0x1000;
+*MS_REC = \0x4000;
+*MNT_DETACH = \2;
+our (
+ $CLONE_NEWNS, $CLONE_NEWUTS,
+ $CLONE_NEWIPC, $CLONE_NEWUSER,
+ $CLONE_NEWPID, $CLONE_NEWNET,
+ $_LINUX_CAPABILITY_VERSION_3, $CAP_SYS_ADMIN,
+ $PR_CAPBSET_READ, $MS_BIND,
+ $MS_REC, $MNT_DETACH
+);
+
+#<<<
+# type codes:
+# 0 -> normal file
+# 1 -> hardlink
+# 2 -> symlink
+# 3 -> character special
+# 4 -> block special
+# 5 -> directory
+my @devfiles = (
+ # filename mode type link target major minor
+ ["", oct(755), 5, '', undef, undef],
+ ["console", oct(666), 3, '', 5, 1],
+ ["fd", oct(777), 2, '/proc/self/fd', undef, undef],
+ ["full", oct(666), 3, '', 1, 7],
+ ["null", oct(666), 3, '', 1, 3],
+ ["ptmx", oct(666), 3, '', 5, 2],
+ ["pts/", oct(755), 5, '', undef, undef],
+ ["random", oct(666), 3, '', 1, 8],
+ ["shm/", oct(755), 5, '', undef, undef],
+ ["stderr", oct(777), 2, '/proc/self/fd/2', undef, undef],
+ ["stdin", oct(777), 2, '/proc/self/fd/0', undef, undef],
+ ["stdout", oct(777), 2, '/proc/self/fd/1', undef, undef],
+ ["tty", oct(666), 3, '', 5, 0],
+ ["urandom", oct(666), 3, '', 1, 9],
+ ["zero", oct(666), 3, '', 1, 5],
+);
+#>>>
+
+# verbosity levels:
+# 0 -> print nothing
+# 1 -> normal output and progress bars
+# 2 -> verbose output
+# 3 -> debug output
+my $verbosity_level = 1;
+
+my $is_covering = 0;
+{
+ # make $@ local, so we don't print "Undefined subroutine called"
+ # in other parts where we evaluate $@
+ local $@ = '';
+ $is_covering = !!(eval { Devel::Cover::get_coverage() });
+}
+
+# the reason why Perl::Critic warns about this is, that it suspects that the
+# programmer wants to implement a test whether the terminal is interactive or
+# not, in which case, complex interactions with the magic *ARGV indeed make it
+# advisable to use IO::Interactive. In our case, we do not want to create an
+# interactivity check but just want to check whether STDERR is opened to a tty,
+# so our use of -t is fine and not "fragile and complicated" as is written in
+# the description of InputOutput::ProhibitInteractiveTest. Also see
+# https://github.com/Perl-Critic/Perl-Critic/issues/918
+sub stderr_is_tty() {
+ ## no critic (InputOutput::ProhibitInteractiveTest)
+ if (-t STDERR) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+sub debug {
+ if ($verbosity_level < 3) {
+ return;
+ }
+ my $msg = shift;
+ my ($package, $filename, $line) = caller;
+ $msg = "D: $PID $line $msg";
+ if (stderr_is_tty()) {
+ $msg = colored($msg, 'clear');
+ }
+ print STDERR "$msg\n";
+ return;
+}
+
+sub info {
+ if ($verbosity_level == 0) {
+ return;
+ }
+ my $msg = shift;
+ if ($verbosity_level >= 3) {
+ my ($package, $filename, $line) = caller;
+ $msg = "$PID $line $msg";
+ }
+ $msg = "I: $msg";
+ if (stderr_is_tty()) {
+ $msg = colored($msg, 'green');
+ }
+ print STDERR "$msg\n";
+ return;
+}
+
+sub warning {
+ if ($verbosity_level == 0) {
+ return;
+ }
+ my $msg = shift;
+ $msg = "W: $msg";
+ if (stderr_is_tty()) {
+ $msg = colored($msg, 'bold yellow');
+ }
+ print STDERR "$msg\n";
+ return;
+}
+
+sub error {
+ # if error() is called with the string from a previous error() that was
+ # caught inside an eval(), then the string will have a newline which we
+ # are stripping here
+ chomp(my $msg = shift);
+ $msg = "E: $msg";
+ if (stderr_is_tty()) {
+ $msg = colored($msg, 'bold red');
+ }
+ if ($verbosity_level == 3) {
+ croak $msg; # produces a backtrace
+ } else {
+ die "$msg\n";
+ }
+}
+
+# The encoding of dev_t is MMMM Mmmm mmmM MMmm, where M is a hex digit of
+# the major number and m is a hex digit of the minor number.
+sub major {
+ my $rdev = shift;
+ my $right
+ = Math::BigInt->from_hex("0x00000000000fff00")->band($rdev)->brsft(8);
+ my $left
+ = Math::BigInt->from_hex("0xfffff00000000000")->band($rdev)->brsft(32);
+ return $right->bior($left);
+}
+
+sub minor {
+ my $rdev = shift;
+ my $right = Math::BigInt->from_hex("0x00000000000000ff")->band($rdev);
+ my $left
+ = Math::BigInt->from_hex("0x00000ffffff00000")->band($rdev)->brsft(12);
+ return $right->bior($left);
+}
+
+sub can_execute {
+ my $tool = shift;
+ my $pid = open my $fh, '-|' // return 0;
+ if ($pid == 0) {
+ open(STDERR, '>&', STDOUT) or die;
+ exec {$tool} $tool, '--version' or die;
+ }
+ chomp(
+ my $content = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ($? != 0) {
+ return 0;
+ }
+ if (length $content == 0) {
+ return 0;
+ }
+ return 1;
+}
+
+# check whether a directory is mounted by comparing the device number of the
+# directory itself with its parent
+sub is_mountpoint {
+ my $dir = shift;
+ if (!-e $dir) {
+ return 0;
+ }
+ my @a = stat "$dir/.";
+ my @b = stat "$dir/..";
+ # if the device number is different, then the directory must be mounted
+ if ($a[0] != $b[0]) {
+ return 1;
+ }
+ # if the inode number is the same, then the directory must be mounted
+ if ($a[1] == $b[1]) {
+ return 1;
+ }
+ return 0;
+}
+
+# tar cannot figure out the decompression program when receiving data on
+# standard input, thus we do it ourselves. This is copied from tar's
+# src/suffix.c
+sub get_tar_compressor {
+ my $filename = shift;
+ if ($filename eq '-') {
+ return;
+ } elsif ($filename =~ /\.tar$/) {
+ return;
+ } elsif ($filename =~ /\.(gz|tgz|taz)$/) {
+ return ['gzip'];
+ } elsif ($filename =~ /\.(Z|taZ)$/) {
+ return ['compress'];
+ } elsif ($filename =~ /\.(bz2|tbz|tbz2|tz2)$/) {
+ return ['bzip2'];
+ } elsif ($filename =~ /\.lz$/) {
+ return ['lzip'];
+ } elsif ($filename =~ /\.(lzma|tlz)$/) {
+ return ['lzma'];
+ } elsif ($filename =~ /\.lzo$/) {
+ return ['lzop'];
+ } elsif ($filename =~ /\.lz4$/) {
+ return ['lz4'];
+ } elsif ($filename =~ /\.(xz|txz)$/) {
+ return ['xz'];
+ } elsif ($filename =~ /\.zst$/) {
+ return ['zstd'];
+ }
+ return;
+}
+
+# avoid dependency on String::ShellQuote by implementing the mechanism
+# from python's shlex.quote function
+sub shellescape {
+ my $string = shift;
+ if (length $string == 0) {
+ return "''";
+ }
+ # search for occurrences of characters that are not safe
+ # the 'a' regex modifier makes sure that \w only matches ASCII
+ if ($string !~ m/[^\w@\%+=:,.\/-]/a) {
+ return $string;
+ }
+ # wrap the string in single quotes and handle existing single quotes by
+ # putting them outside of the single-quoted string
+ $string =~ s/'/'"'"'/g;
+ return "'$string'";
+}
+
+sub test_unshare_userns {
+ my $verbose = shift;
+
+ local *maybe_error = sub {
+ my $msg = shift;
+ if ($verbose) {
+ error $msg;
+ } else {
+ debug $msg;
+ }
+ };
+
+ if ($EFFECTIVE_USER_ID == 0) {
+ maybe_error("cannot unshare user namespace when executing as root");
+ return 0;
+ }
+ # arguments to syscalls have to be stored in their own variable or
+ # otherwise we will get "Modification of a read-only value attempted"
+ my $unshare_flags = $CLONE_NEWUSER;
+ # we spawn a new per process because if unshare succeeds, we would
+ # otherwise have unshared the mmdebstrap process itself which we don't want
+ my $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ my $ret = syscall(&SYS_unshare, $unshare_flags);
+ if ($ret == 0) {
+ exit 0;
+ } else {
+ maybe_error("unshare syscall failed: $!");
+ exit 1;
+ }
+ }
+ waitpid($pid, 0);
+ if (($? >> 8) != 0) {
+ return 0;
+ }
+ # if newuidmap and newgidmap exist, the exit status will be 1 when
+ # executed without parameters
+ system "newuidmap 2>/dev/null";
+ if (($? >> 8) != 1) {
+ if (($? >> 8) == 127) {
+ maybe_error("cannot find newuidmap");
+ } else {
+ maybe_error("newuidmap returned unknown exit status: $?");
+ }
+ return 0;
+ }
+ system "newgidmap 2>/dev/null";
+ if (($? >> 8) != 1) {
+ if (($? >> 8) == 127) {
+ maybe_error("cannot find newgidmap");
+ } else {
+ maybe_error("newgidmap returned unknown exit status: $?");
+ }
+ return 0;
+ }
+ my @idmap = read_subuid_subgid($verbose);
+ if (scalar @idmap == 0) {
+ maybe_error("failed to parse /etc/subuid and /etc/subgid");
+ return 0;
+ }
+ # too much can go wrong when doing the dance required to unsharing the user
+ # namespace, so instead of adding more complexity to support maybe_error()
+ # to a function that is already too complex, we use eval()
+ eval {
+ $pid = get_unshare_cmd(
+ sub {
+ if ($EFFECTIVE_USER_ID == 0) {
+ exit 0;
+ } else {
+ exit 1;
+ }
+ },
+ \@idmap
+ );
+ waitpid $pid, 0;
+ if ($? != 0) {
+ maybe_error("failed to unshare the user namespace");
+ return 0;
+ }
+ };
+ if ($@) {
+ maybe_error($@);
+ return 0;
+ }
+ return 1;
+}
+
+sub read_subuid_subgid {
+ my $verbose = shift;
+ my @result = ();
+ my $username = getpwuid $REAL_USER_ID;
+ my ($subid, $num_subid, $fh, $n);
+
+ local *maybe_warn = sub {
+ my $msg = shift;
+ if ($verbose) {
+ warning $msg;
+ } else {
+ debug $msg;
+ }
+ };
+ if (!-e "/etc/subuid") {
+ maybe_warn("/etc/subuid doesn't exist");
+ return;
+ }
+ if (!-r "/etc/subuid") {
+ maybe_warn("/etc/subuid is not readable");
+ return;
+ }
+
+ open $fh, "<", "/etc/subuid"
+ or maybe_warn("cannot open /etc/subuid for reading: $!");
+ if (!$fh) {
+ return;
+ }
+ while (my $line = <$fh>) {
+ ($n, $subid, $num_subid) = split(/:/, $line, 3);
+ last if ($n eq $username);
+ }
+ close $fh;
+ if (!length $subid) {
+ maybe_warn("/etc/subuid is empty");
+ return;
+ }
+ if ($n ne $username) {
+ maybe_warn("no entry in /etc/subuid for $username");
+ return;
+ }
+ push @result, ["u", 0, $subid, $num_subid];
+
+ if (scalar(@result) < 1) {
+ maybe_warn("/etc/subuid does not contain an entry for $username");
+ return;
+ }
+ if (scalar(@result) > 1) {
+ maybe_warn("/etc/subuid contains multiple entries for $username");
+ return;
+ }
+
+ if (!-e "/etc/subgid") {
+ maybe_warn("/etc/subgid doesn't exist");
+ return;
+ }
+ if (!-r "/etc/subgid") {
+ maybe_warn("/etc/subgid is not readable");
+ return;
+ }
+
+ open $fh, "<", "/etc/subgid"
+ or maybe_warn("cannot open /etc/subgid for reading: $!");
+ if (!$fh) {
+ return;
+ }
+ while (my $line = <$fh>) {
+ ($n, $subid, $num_subid) = split(/:/, $line, 3);
+ last if ($n eq $username);
+ }
+ close $fh;
+ if (!length $subid) {
+ maybe_warn("/etc/subgid is empty");
+ return;
+ }
+ if ($n ne $username) {
+ maybe_warn("no entry in /etc/subgid for $username");
+ return;
+ }
+ push @result, ["g", 0, $subid, $num_subid];
+
+ if (scalar(@result) < 2) {
+ maybe_warn("/etc/subgid does not contain an entry for $username");
+ return;
+ }
+ if (scalar(@result) > 2) {
+ maybe_warn("/etc/subgid contains multiple entries for $username");
+ return;
+ }
+
+ return @result;
+}
+
+# This function spawns two child processes forming the following process tree
+#
+# A
+# |
+# fork()
+# | \
+# B C
+# | |
+# | fork()
+# | | \
+# | D E
+# | | |
+# |unshare()
+# | close()
+# | | |
+# | | read()
+# | | newuidmap(D)
+# | | newgidmap(D)
+# | | /
+# | waitpid()
+# | |
+# | fork()
+# | | \
+# | F G
+# | | |
+# | | exec()
+# | | /
+# | waitpid()
+# | /
+# waitpid()
+#
+# To better refer to each individual part, we give each process a new
+# identifier after calling fork(). Process A is the main process. After
+# executing fork() we call the parent and child B and C, respectively. This
+# first fork() is done because we do not want to modify A. B then remains
+# waiting for its child C to finish. C calls fork() again, splitting into
+# the parent D and its child E. In the parent D we call unshare() and close a
+# pipe shared by D and E to signal to E that D is done with calling unshare().
+# E notices this by using read() and follows up with executing the tools
+# new[ug]idmap on D. E finishes and D continues with doing another fork().
+# This is because when unsharing the PID namespace, we need a PID 1 to be kept
+# alive or otherwise any child processes cannot fork() anymore themselves. So
+# we keep F as PID 1 and finally call exec() in G.
+sub get_unshare_cmd {
+ my $cmd = shift;
+ my $idmap = shift;
+
+ # unsharing the mount namespace (NEWNS) requires CAP_SYS_ADMIN
+ my $unshare_flags
+ = $CLONE_NEWNS | $CLONE_NEWPID | $CLONE_NEWUTS | $CLONE_NEWIPC;
+
+ # we only need to add CLONE_NEWUSER if we are not yet root
+ if ($EFFECTIVE_USER_ID != 0) {
+ $unshare_flags |= $CLONE_NEWUSER;
+ }
+
+ if (0) {
+ $unshare_flags |= $CLONE_NEWNET;
+ }
+
+ # fork a new process and let the child get unshare()ed
+ # we don't want to unshare the parent process
+ my $gcpid = fork() // error "fork() failed: $!";
+ if ($gcpid == 0) {
+ # Create a pipe for the parent process to signal the child process that
+ # it is done with calling unshare() so that the child can go ahead
+ # setting up uid_map and gid_map.
+ pipe my $rfh, my $wfh;
+ # We have to do this dance with forking a process and then modifying
+ # the parent from the child because:
+ # - new[ug]idmap can only be called on a process id after that process
+ # has unshared the user namespace
+ # - a process looses its capabilities if it performs an execve() with
+ # nonzero user ids see the capabilities(7) man page for details.
+ # - a process that unshared the user namespace by default does not
+ # have the privileges to call new[ug]idmap on itself
+ #
+ # this also works the other way around (the child setting up a user
+ # namespace and being modified from the parent) but that way, the
+ # parent would have to stay around until the child exited (so a pid
+ # would be wasted). Additionally, that variant would require an
+ # additional pipe to let the parent signal the child that it is done
+ # with calling new[ug]idmap. The way it is done here, this signaling
+ # can instead be done by wait()-ing for the exit of the child.
+
+ my $ppid = $$;
+ my $cpid = fork() // error "fork() failed: $!";
+ if ($cpid == 0) {
+ # child
+
+ # Close the writing descriptor at our end of the pipe so that we
+ # see EOF when parent closes its descriptor.
+ close $wfh;
+
+ # Wait for the parent process to finish its unshare() call by
+ # waiting for an EOF.
+ 0 == sysread $rfh, my $c, 1 or error "read() did not receive EOF";
+
+ # the process is already root, so no need for newuidmap/newgidmap
+ if ($EFFECTIVE_USER_ID == 0) {
+ exit 0;
+ }
+
+ # The program's new[ug]idmap have to be used because they are
+ # setuid root. These privileges are needed to map the ids from
+ # /etc/sub[ug]id to the user namespace set up by the parent.
+ # Without these privileges, only the id of the user itself can be
+ # mapped into the new namespace.
+ #
+ # Since new[ug]idmap is setuid root we also don't need to write
+ # "deny" to /proc/$$/setgroups beforehand (this is otherwise
+ # required for unprivileged processes trying to write to
+ # /proc/$$/gid_map since kernel version 3.19 for security reasons)
+ # and therefore the parent process keeps its ability to change its
+ # own group here.
+ #
+ # Since /proc/$ppid/[ug]id_map can only be written to once,
+ # respectively, instead of making multiple calls to new[ug]idmap,
+ # we assemble a command line that makes one call each.
+ my $uidmapcmd = "";
+ my $gidmapcmd = "";
+ foreach (@{$idmap}) {
+ my ($t, $hostid, $nsid, $range) = @{$_};
+ if ($t ne "u" and $t ne "g" and $t ne "b") {
+ error "invalid idmap type: $t";
+ }
+ if ($t eq "u" or $t eq "b") {
+ $uidmapcmd .= " $hostid $nsid $range";
+ }
+ if ($t eq "g" or $t eq "b") {
+ $gidmapcmd .= " $hostid $nsid $range";
+ }
+ }
+ my $idmapcmd = '';
+ if ($uidmapcmd ne "") {
+ 0 == system "newuidmap $ppid $uidmapcmd"
+ or error "newuidmap $ppid $uidmapcmd failed: $!";
+ }
+ if ($gidmapcmd ne "") {
+ 0 == system "newgidmap $ppid $gidmapcmd"
+ or error "newgidmap $ppid $gidmapcmd failed: $!";
+ }
+ exit 0;
+ }
+
+ # parent
+
+ # After fork()-ing, the parent immediately calls unshare...
+ 0 == syscall &SYS_unshare, $unshare_flags
+ or error "unshare() failed: $!";
+
+ # .. and then signals the child process that we are done with the
+ # unshare() call by sending an EOF.
+ close $wfh;
+
+ # Wait for the child process to finish its setup by waiting for its
+ # exit.
+ $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
+ my $exit = $? >> 8;
+ if ($exit != 0) {
+ error "child had a non-zero exit status: $exit";
+ }
+
+ # Currently we are nobody (uid and gid are 65534). So we become root
+ # user and group instead.
+ #
+ # We are using direct syscalls instead of setting $(, $), $< and $>
+ # because then perl would do additional stuff which we don't need or
+ # want here, like checking /proc/sys/kernel/ngroups_max (which might
+ # not exist). It would also also call setgroups() in a way that makes
+ # the root user be part of the group unknown.
+ if ($EFFECTIVE_USER_ID != 0) {
+ 0 == syscall &SYS_setgid, 0 or error "setgid failed: $!";
+ 0 == syscall &SYS_setuid, 0 or error "setuid failed: $!";
+ 0 == syscall &SYS_setgroups, 0, 0 or error "setgroups failed: $!";
+ }
+
+ if (1) {
+ # When the pid namespace is also unshared, then processes expect a
+ # master pid to always be alive within the namespace. To achieve
+ # this, we fork() here instead of exec() to always have one dummy
+ # process running as pid 1 inside the namespace. This is also what
+ # the unshare tool does when used with the --fork option.
+ #
+ # Otherwise, without a pid 1, new processes cannot be forked
+ # anymore after pid 1 finished.
+ my $cpid = fork() // error "fork() failed: $!";
+ if ($cpid != 0) {
+ # The parent process will stay alive as pid 1 in this
+ # namespace until the child finishes executing. This is
+ # important because pid 1 must never die or otherwise nothing
+ # new can be forked.
+ $cpid == waitpid $cpid, 0 or error "waitpid() failed: $!";
+ exit($? >> 8);
+ }
+ }
+
+ &{$cmd}();
+
+ exit 0;
+ }
+
+ # parent
+ return $gcpid;
+}
+
+sub havemknod {
+ my $root = shift;
+ my $havemknod = 0;
+ if (-e "$root/test-dev-null") {
+ error "/test-dev-null already exists";
+ }
+ TEST: {
+ # we fork so that we can read STDERR
+ my $pid = open my $fh, '-|' // error "failed to fork(): $!";
+ if ($pid == 0) {
+ open(STDERR, '>&', STDOUT) or error "cannot open STDERR: $!";
+ # we use mknod(1) instead of the system call because creating the
+ # right dev_t argument requires makedev(3)
+ exec 'mknod', "$root/test-dev-null", 'c', '1', '3';
+ }
+ chomp(
+ my $content = do { local $/; <$fh> }
+ );
+ close $fh;
+ {
+ last TEST unless $? == 0 and $content eq '';
+ last TEST unless -c "$root/test-dev-null";
+ last TEST unless open my $fh, '>', "$root/test-dev-null";
+ last TEST unless print $fh 'test';
+ }
+ $havemknod = 1;
+ }
+ if (-e "$root/test-dev-null") {
+ unlink "$root/test-dev-null"
+ or error "cannot unlink /test-dev-null: $!";
+ }
+ return $havemknod;
+}
+
+# inspired by /usr/share/perl/5.34/pod/perlfaq8.pod
+sub terminal_width {
+ if (!stderr_is_tty()) {
+ return -1;
+ }
+ if (!defined &TIOCGWINSZ) {
+ return -1;
+ }
+ if (!-e "/dev/tty") {
+ return -1;
+ }
+ my $tty_fh;
+ if (!open($tty_fh, "+<", "/dev/tty")) {
+ return -1;
+ }
+ my $winsize = '';
+ if (!ioctl($tty_fh, &TIOCGWINSZ, $winsize)) {
+ return -1;
+ }
+ my (undef, $col, undef, undef) = unpack('S4', $winsize);
+ return $col;
+}
+
+# Prints the current status, the percentage and a progress bar on STDERR if
+# it is an interactive tty and if verbosity is set to 1.
+#
+# * first 12 chars: status
+# * following 7 chars: percentage
+# * progress bar until 79 chars are filled
+sub print_progress {
+ if ($verbosity_level != 1) {
+ return;
+ }
+ if (!stderr_is_tty()) {
+ return;
+ }
+ my $perc = shift;
+ my $status = shift;
+ my $len_status = 12;
+ my $len_perc = 7;
+ my $len_prog_min = 10;
+ my $len_prog_max = 60;
+ my $twidth = terminal_width();
+
+ if ($twidth <= $len_status) {
+ return;
+ }
+ # \e[2K clears everything on the current line (i.e. the progress bar)
+ print STDERR "\e[2K";
+ if ($perc eq "done") {
+ print STDERR "done\n";
+ return;
+ }
+ if (defined $status) {
+ printf STDERR "%*s", -$len_status, "$status:";
+ } else {
+ print STDERR (" " x $len_status);
+ }
+ if ($twidth <= $len_status + $len_perc) {
+ print STDERR "\r";
+ return;
+ }
+ if ($perc >= 100) {
+ $perc = 100;
+ }
+ printf STDERR "%*.2f", $len_perc, $perc;
+ if ($twidth <= $len_status + $len_perc + $len_prog_min) {
+ print STDERR "\r";
+ return;
+ }
+ my $len_prog = $twidth - $len_perc - $len_status;
+ if ($len_prog > $len_prog_max) {
+ $len_prog = $len_prog_max;
+ }
+ my $num_x = int($perc * ($len_prog - 3) / 100);
+ my $bar = '=' x $num_x;
+ if ($num_x != ($len_prog - 3)) {
+ $bar .= '>';
+ $bar .= ' ' x ($len_prog - $num_x - 4);
+ }
+ print STDERR " [$bar]\r";
+ return;
+}
+
+sub run_progress {
+ my ($get_exec, $line_handler, $line_has_error, $chdir) = @_;
+ pipe my $rfh, my $wfh;
+ my $got_signal = 0;
+ my $ignore = sub {
+ info "run_progress() received signal $_[0]: waiting for child...";
+ };
+
+ debug("run_progress: exec " . (join ' ', ($get_exec->('${FD}'))));
+
+ # delay signals so that we can fork and change behaviour of the signal
+ # handler in parent and child without getting interrupted
+ my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
+ POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
+
+ my $pid1 = open(my $pipe, '-|') // error "failed to fork(): $!";
+
+ if ($pid1 == 0) {
+ # child: default signal handlers
+ local $SIG{'INT'} = 'DEFAULT';
+ local $SIG{'HUP'} = 'DEFAULT';
+ local $SIG{'PIPE'} = 'DEFAULT';
+ local $SIG{'TERM'} = 'DEFAULT';
+
+ # unblock all delayed signals (and possibly handle them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ close $rfh;
+ # Unset the close-on-exec flag, so that the file descriptor does not
+ # get closed when we exec
+ my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
+ fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC)
+ or error "fcntl F_SETFD: $!";
+ my $fd = fileno $wfh;
+ # redirect stderr to stdout so that we can capture it
+ open(STDERR, '>&', STDOUT) or error "cannot open STDOUT: $!";
+ my @execargs = $get_exec->($fd);
+ # before apt 1.5, "apt-get update" attempted to chdir() into the
+ # working directory. This will fail if the current working directory
+ # is not accessible by the user (for example in unshare mode). See
+ # Debian bug #860738
+ if (defined $chdir) {
+ chdir $chdir or error "failed chdir() to $chdir: $!";
+ }
+ eval { Devel::Cover::set_coverage("none") } if $is_covering;
+ exec { $execargs[0] } @execargs
+ or error 'cannot exec() ' . (join ' ', @execargs);
+ }
+ close $wfh;
+
+ # spawn two processes:
+ # parent will parse stdout to look for errors
+ # child will parse $rfh for the progress meter
+ my $pid2 = fork() // error "failed to fork(): $!";
+ if ($pid2 == 0) {
+ # child: default signal handlers
+ local $SIG{'INT'} = 'IGNORE';
+ local $SIG{'HUP'} = 'IGNORE';
+ local $SIG{'PIPE'} = 'IGNORE';
+ local $SIG{'TERM'} = 'IGNORE';
+
+ # unblock all delayed signals (and possibly handle them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ if ($verbosity_level != 1 || !stderr_is_tty()) {
+ # no need to print any progress
+ # we still need to consume everything from $rfh or otherwise apt
+ # will block forever if there is too much output
+ local $/;
+ <$rfh>;
+ close $rfh;
+ exit 0;
+ }
+ my $progress = 0.0;
+ my $status = undef;
+ print_progress($progress);
+ while (my $line = <$rfh>) {
+ my ($newprogress, $newstatus) = $line_handler->($line);
+ next unless $newprogress;
+ # start a new line if the new progress value is less than the
+ # previous one
+ if ($newprogress < $progress) {
+ print_progress("done");
+ }
+ if (defined $newstatus) {
+ $status = $newstatus;
+ }
+ print_progress($newprogress, $status);
+ $progress = $newprogress;
+ }
+ print_progress("done");
+
+ exit 0;
+ }
+
+ # parent: ignore signals
+ # by using "local", the original is automatically restored once the
+ # function returns
+ local $SIG{'INT'} = $ignore;
+ local $SIG{'HUP'} = $ignore;
+ local $SIG{'PIPE'} = $ignore;
+ local $SIG{'TERM'} = $ignore;
+
+ # unblock all delayed signals (and possibly handle them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ my $output = '';
+ my $has_error = 0;
+ while (my $line = <$pipe>) {
+ $has_error = $line_has_error->($line);
+ if ($verbosity_level >= 2) {
+ print STDERR $line;
+ } else {
+ # forward captured apt output
+ $output .= $line;
+ }
+ }
+
+ close($pipe);
+ my $fail = 0;
+ if ($? != 0 or $has_error) {
+ $fail = 1;
+ }
+
+ waitpid $pid2, 0;
+ $? == 0 or error "progress parsing failed";
+
+ if ($got_signal) {
+ error "run_progress() received signal: $got_signal";
+ }
+
+ # only print failure after progress output finished or otherwise it
+ # might interfere with the remaining output
+ if ($fail) {
+ if ($verbosity_level >= 1) {
+ print STDERR $output;
+ }
+ error((join ' ', $get_exec->('<$fd>')) . ' failed');
+ }
+ return;
+}
+
+sub run_dpkg_progress {
+ my $options = shift;
+ my @debs = @{ $options->{PKGS} // [] };
+ my $get_exec
+ = sub { return @{ $options->{ARGV} }, "--status-fd=$_[0]", @debs; };
+ my $line_has_error = sub { return 0; };
+ my $num = 0;
+ # each package has one install and one configure step, thus the total
+ # number is twice the number of packages
+ my $total = (scalar @debs) * 2;
+ my $line_handler = sub {
+ my $status = undef;
+ if ($_[0] =~ /^processing: (install|configure): /) {
+ if ($1 eq 'install') {
+ $status = 'installing';
+ } elsif ($1 eq 'configure') {
+ $status = 'configuring';
+ } else {
+ error "unknown status: $1";
+ }
+ $num += 1;
+ }
+ if ($total == 0) {
+ return 0, $status;
+ } else {
+ return $num / $total * 100, $status;
+ }
+ };
+ run_progress $get_exec, $line_handler, $line_has_error;
+ return;
+}
+
+sub run_apt_progress {
+ my $options = shift;
+ my @debs = @{ $options->{PKGS} // [] };
+
+ if ($verbosity_level >= 3) {
+ my @apt_debug_opts = qw(
+ -oDebug::pkgProblemResolver=true
+ -oDebug::pkgDepCache::Marker=1
+ -oDebug::pkgDepCache::AutoInstall=1
+ );
+ push @{ $options->{ARGV} }, @apt_debug_opts;
+ }
+
+ my $get_exec = sub {
+ my @prefix = ();
+ my @opts = ();
+ return (
+ @prefix,
+ @{ $options->{ARGV} },
+ @opts,
+ "-oAPT::Status-Fd=$_[0]",
+ # prevent apt from messing up the terminal and allow dpkg to
+ # receive SIGINT and quit immediately without waiting for
+ # maintainer script to finish
+ '-oDpkg::Use-Pty=false',
+ @debs
+ );
+ };
+ my $line_has_error = sub { return 0; };
+ if ($options->{FIND_APT_WARNINGS}) {
+ $line_has_error = sub {
+ # apt-get doesn't report a non-zero exit if the update failed.
+ # Thus, we have to parse its output. See #778357, #776152, #696335
+ # and #745735 for the parsing bugs as well as #594813, #696335,
+ # #776152, #778357 and #953726 for non-zero exit on transient
+ # network errors.
+ #
+ # For example, we want to fail with the following warning:
+ # W: Some index files failed to download. They have been ignored,
+ # or old ones used instead.
+ # But since this message is meant for human consumption it is not
+ # guaranteed to be stable across different apt versions and may
+ # change arbitrarily in the future. Thus, we error out on any W:
+ # lines as well. The downside is, that apt also unconditionally
+ # and by design prints a warning for unsigned repositories, even
+ # if they were allowed with Acquire::AllowInsecureRepositories "1"
+ # or with trusted=yes.
+ #
+ # A workaround was introduced by apt 2.1.16 with the --error-on=any
+ # option to apt-get update.
+ if ($_[0] =~ /^(W: |Err:)/) {
+ return 1;
+ }
+ return 0;
+ };
+ }
+ my $line_handler = sub {
+ if ($_[0] =~ /(pmstatus|dlstatus):[^:]+:(\d+\.\d+):.*/) {
+ my $status = undef;
+ if ($1 eq 'pmstatus') {
+ $status = "installing";
+ } elsif ($1 eq 'dlstatus') {
+ $status = "downloading";
+ } else {
+ error "unknown status: $1";
+ }
+ return $2, $status;
+ }
+ };
+ run_progress $get_exec, $line_handler, $line_has_error, $options->{CHDIR};
+ return;
+}
+
+sub run_apt_download_progress {
+ my $options = shift;
+ if ($options->{dryrun}) {
+ info "simulate downloading packages with apt...";
+ } else {
+ info "downloading packages with apt...";
+ }
+
+ pipe my $rfh, my $wfh;
+ my $pid = open my $fh, '-|' // error "fork() failed: $!";
+ if ($pid == 0) {
+ close $wfh;
+ # read until parent process closes $wfh
+ my $content = do { local $/; <$rfh> };
+ close $rfh;
+ # the parent is done -- pass what we read back to it
+ print $content;
+ exit 0;
+ }
+ close $rfh;
+ # Unset the close-on-exec flag, so that the file descriptor does not
+ # get closed when we exec
+ my $flags = fcntl($wfh, F_GETFD, 0) or error "fcntl F_GETFD: $!";
+ fcntl($wfh, F_SETFD, $flags & ~FD_CLOEXEC) or error "fcntl F_SETFD: $!";
+ my $fd = fileno $wfh;
+ # run_apt_progress() can raise an exception which would leave this function
+ # without cleaning up the other thread we started, making mmdebstrap hang
+ # in case run_apt_progress() fails -- so wrap this in eval() instead
+ eval {
+ # 2022-05-02, #debian-apt on OFTC, times in UTC+2
+ # 16:57 < josch> DonKult: how is -oDebug::pkgDpkgPm=1
+ # -oDir::Log=/dev/null a "fancy no-op"?
+ # 11:52 < DonKult> josch: "fancy no-op" in sofar as it does nothing to
+ # the system even through its not in a special mode
+ # ala simulation or download-only. It does all the
+ # things it normally does, except that it just prints
+ # the dpkg calls instead of execv() them which in
+ # practice amounts means it does nothing (the Dir::Log
+ # just prevents libapt from creating the /var/log/apt
+ # directories. As the code creates them even if no
+ # logs will be placed there…). As said, midterm an apt
+ # --print-install-packages or something would be nice
+ # to avoid running everything.
+ run_apt_progress({
+ ARGV => [
+ 'apt-get',
+ '--yes',
+ '-oDebug::pkgDpkgPm=1',
+ '-oDir::Log=/dev/null',
+ $options->{dryrun}
+ ? '-oAPT::Get::Simulate=true'
+ : (
+ "-oAPT::Keep-Fds::=$fd",
+ "-oDPkg::Tools::options::'cat >&$fd'::InfoFD=$fd",
+ "-oDpkg::Pre-Install-Pkgs::=cat >&$fd",
+ # no need to lock the database if we are just downloading
+ "-oDebug::NoLocking=1",
+ # no need for pty magic if we write no log
+ "-oDpkg::Use-Pty=0",
+ # unset this or otherwise "cat >&$fd" will fail
+ "-oDPkg::Chroot-Directory=",
+ ),
+ @{ $options->{APT_ARGV} },
+ ],
+ });
+ };
+ my $err = '';
+ if ($@) {
+ $err = "apt download failed: $@";
+ }
+ # signal the child process that we are done
+ close $wfh;
+ # and then read from it what it got
+ my @listofdebs = <$fh>;
+ close $fh;
+ if ($? != 0) {
+ $err = "status child failed";
+ }
+ if ($err) {
+ error $err;
+ }
+ # remove trailing newlines
+ chomp @listofdebs;
+ return @listofdebs;
+}
+
+sub setup_mounts {
+ my $options = shift;
+
+ my @cleanup_tasks = ();
+
+ eval {
+ if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
+ # if more than essential should be installed, make the system look
+ # more like a real one by creating or bind-mounting the device
+ # nodes
+ foreach my $file (@devfiles) {
+ my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
+ = @{$file};
+ next if $fname eq '';
+ if ($type == 0) { # normal file
+ error "type 0 not implemented";
+ } elsif ($type == 1) { # hardlink
+ error "type 1 not implemented";
+ } elsif ($type == 2) { # symlink
+ if (!$options->{havemknod}) {
+ # If we had mknod, then the symlink was already created
+ # in the run_setup function.
+ if (!-d "$options->{root}/dev") {
+ warning(
+ "skipping creation of ./dev/$fname because the"
+ . " /dev directory is missing in the target"
+ );
+ next;
+ }
+ if (-e "$options->{root}/dev/$fname") {
+ warning(
+ "skipping creation of ./dev/$fname because it"
+ . " already exists in the target");
+ next;
+ }
+ push @cleanup_tasks, sub {
+ unlink "$options->{root}/dev/$fname"
+ or warning("cannot unlink ./dev/$fname: $!");
+ };
+ symlink $linkname, "$options->{root}/dev/$fname"
+ or warning
+ "cannot create symlink ./dev/$fname -> $linkname";
+ }
+ } elsif ($type == 3 or $type == 4) {
+ # character/block special
+ if (any { $_ =~ '^chroot/mount(?:/dev)?$' }
+ @{ $options->{skip} }) {
+ info "skipping chroot/mount/dev as requested";
+ } elsif (!$options->{canmount}) {
+ warning "skipping bind-mounting ./dev/$fname";
+ } elsif (!$options->{havemknod}) {
+ if (!-d "$options->{root}/dev") {
+ warning(
+ "skipping creation of ./dev/$fname because the"
+ . " /dev directory is missing in the target"
+ );
+ next;
+ }
+ if ($fname eq "ptmx") {
+ # We must not bind-mount ptmx from the outside or
+ # otherwise posix_openpt() will fail. Instead
+ # /dev/ptmx must refer to /dev/pts/ptmx either by
+ # symlink or by bind-mounting. We choose a symlink.
+ symlink '/dev/pts/ptmx',
+ "$options->{root}/dev/ptmx"
+ or error "cannot create /dev/pts/ptmx symlink";
+ push @cleanup_tasks, sub {
+ unlink "$options->{root}/dev/ptmx"
+ or warning "unlink /dev/ptmx";
+ };
+ next;
+ }
+ if (!-e "/dev/$fname") {
+ warning("skipping creation of ./dev/$fname because"
+ . " /dev/$fname does not exist"
+ . " on the outside");
+ next;
+ }
+ if (!-c "/dev/$fname") {
+ warning("skipping creation of ./dev/$fname because"
+ . " /dev/$fname on the outside is not a"
+ . " character special file");
+ next;
+ }
+ open my $fh, '>', "$options->{root}/dev/$fname"
+ or error
+ "cannot open $options->{root}/dev/$fname: $!";
+ close $fh;
+ my @umountopts = ();
+ if ($options->{mode} eq 'unshare') {
+ push @umountopts, '--no-mtab';
+ }
+ push @cleanup_tasks, sub {
+ 0 == system('umount', @umountopts,
+ "$options->{root}/dev/$fname")
+ or warning("umount ./dev/$fname failed: $?");
+ unlink "$options->{root}/dev/$fname"
+ or warning("cannot unlink ./dev/$fname: $!");
+ };
+ 0 == system('mount', '-o', 'bind', "/dev/$fname",
+ "$options->{root}/dev/$fname")
+ or error "mount ./dev/$fname failed: $?";
+ }
+ } elsif ($type == 5) {
+ # directory
+ if (any { $_ =~ '^chroot/mount(?:/dev)?$' }
+ @{ $options->{skip} }) {
+ info "skipping chroot/mount/dev as requested";
+ } elsif (!$options->{canmount}) {
+ warning "skipping bind-mounting ./dev/$fname";
+ } else {
+ if (!-d "$options->{root}/dev") {
+ warning(
+ "skipping creation of ./dev/$fname because the"
+ . " /dev directory is missing in the target"
+ );
+ next;
+ }
+ if (!-e "/dev/$fname" && $fname ne "pts/") {
+ warning("skipping creation of ./dev/$fname because"
+ . " /dev/$fname does not exist"
+ . " on the outside");
+ next;
+ }
+ if (!-d "/dev/$fname" && $fname ne "pts/") {
+ warning("skipping creation of ./dev/$fname because"
+ . " /dev/$fname on the outside is not a"
+ . " directory");
+ next;
+ }
+ if (!$options->{havemknod}) {
+ # If had mknod, then the directory to bind-mount
+ # into was already created in the run_setup
+ # function.
+ push @cleanup_tasks, sub {
+ rmdir "$options->{root}/dev/$fname"
+ or warning("cannot rmdir ./dev/$fname: $!");
+ };
+ if (-e "$options->{root}/dev/$fname") {
+ if (!-d "$options->{root}/dev/$fname") {
+ error
+ "./dev/$fname already exists but is not"
+ . " a directory";
+ }
+ } else {
+ my $num_created
+ = make_path "$options->{root}/dev/$fname",
+ { error => \my $err };
+ if ($err && @$err) {
+ error(
+ join "; ",
+ (
+ map {
+ "cannot create "
+ . (join ": ", %{$_})
+ } @$err
+ ));
+ } elsif ($num_created == 0) {
+ error( "cannot create $options->{root}"
+ . "/dev/$fname");
+ }
+ }
+ chmod $mode, "$options->{root}/dev/$fname"
+ or error "cannot chmod ./dev/$fname: $!";
+ }
+ my @umountopts = ();
+ if ($options->{mode} eq 'unshare') {
+ push @umountopts, '--no-mtab';
+ }
+ push @cleanup_tasks, sub {
+ 0 == system('umount', @umountopts,
+ "$options->{root}/dev/$fname")
+ or warning("umount ./dev/$fname failed: $?");
+ };
+ if ($fname eq "pts/") {
+ # We cannot just bind-mount /dev/pts from the host
+ # as doing so will make posix_openpt() fail.
+ # Instead, we need to mount a new devpts.
+ # We need ptmxmode=666 because /dev/ptmx is a
+ # symlink to /dev/pts/ptmx and without it
+ # posix_openpt() will fail if we are not the root
+ # user. See also:
+ # kernel.o/doc/Documentation/filesystems/devpts.txt
+ # salsa.d.o/debian/schroot/-/merge_requests/2
+ # https://bugs.debian.org/856877
+ # https://bugs.debian.org/817236
+ 0 == system(
+ 'mount',
+ '-t',
+ 'devpts',
+ 'none',
+ "$options->{root}/dev/pts",
+ '-o',
+ 'noexec,nosuid,uid=5,mode=620,ptmxmode=666'
+ ) or error "mount /dev/pts failed";
+ } else {
+ 0 == system('mount', '-o', 'bind', "/dev/$fname",
+ "$options->{root}/dev/$fname")
+ or error "mount ./dev/$fname failed: $?";
+ }
+ }
+ } else {
+ error "unsupported type: $type";
+ }
+ }
+ } elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
+ # we cannot mount in fakechroot mode
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+ # We can only mount /proc and /sys after extracting the essential
+ # set because if we mount it before, then base-files will not be able
+ # to extract those
+ if ( (any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && (any { $_ =~ '^chroot/mount(?:/sys)?$' } @{ $options->{skip} }))
+ {
+ info "skipping chroot/mount/sys as requested";
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !$options->{canmount}) {
+ warning "skipping mount sysfs";
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !-d "$options->{root}/sys") {
+ warning("skipping mounting of sysfs because the"
+ . " /sys directory is missing in the target");
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !-e "/sys") {
+ warning("skipping mounting /sys because"
+ . " /sys does not exist on the outside");
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !-d "/sys") {
+ warning("skipping mounting /sys because"
+ . " /sys on the outside is not a directory");
+ } elsif ($options->{mode} eq 'root') {
+ # we don't know whether we run in root mode inside an unshared
+ # user namespace or as real root so we first try the real mount and
+ # then fall back to mounting in a way that works in unshared mode
+ if (
+ 0 == system(
+ 'mount', '-t',
+ 'sysfs', '-o',
+ 'ro,nosuid,nodev,noexec', 'sys',
+ "$options->{root}/sys"
+ )
+ ) {
+ push @cleanup_tasks, sub {
+ 0 == system('umount', "$options->{root}/sys")
+ or warning("umount /sys failed: $?");
+ };
+ } elsif (
+ 0 == system('mount', '-o', 'rbind', '/sys',
+ "$options->{root}/sys")) {
+ push @cleanup_tasks, sub {
+ # since we cannot write to /etc/mtab we need --no-mtab
+ # unmounting /sys only seems to be successful with --lazy
+ 0 == system(
+ 'umount', '--no-mtab',
+ '--lazy', "$options->{root}/sys"
+ ) or warning("umount /sys failed: $?");
+ };
+ } else {
+ error "mount /sys failed: $?";
+ }
+ } elsif ($options->{mode} eq 'unshare') {
+ # naturally we have to clean up after ourselves in sudo mode where
+ # we do a real mount. But we also need to unmount in unshare mode
+ # because otherwise, even with the --one-file-system tar option,
+ # the permissions of the mount source will be stored and not the
+ # mount target (the directory)
+ push @cleanup_tasks, sub {
+ # since we cannot write to /etc/mtab we need --no-mtab
+ # unmounting /sys only seems to be successful with --lazy
+ 0 == system('umount', '--no-mtab', '--lazy',
+ "$options->{root}/sys")
+ or warning("umount /sys failed: $?");
+ };
+ # without the network namespace unshared, we cannot mount a new
+ # sysfs. Since we need network, we just bind-mount.
+ #
+ # we have to rbind because just using bind results in "wrong fs
+ # type, bad option, bad superblock" error
+ 0 == system('mount', '-o', 'rbind', '/sys', "$options->{root}/sys")
+ or error "mount /sys failed: $?";
+ } elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
+ # we cannot mount in fakechroot mode
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+ if (
+ (any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && (any { $_ =~ '^chroot/mount(?:/proc)?$' } @{ $options->{skip} })
+ ) {
+ info "skipping chroot/mount/proc as requested";
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !$options->{canmount}) {
+ warning "skipping mount proc";
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !-d "$options->{root}/proc") {
+ warning("skipping mounting of proc because the"
+ . " /proc directory is missing in the target");
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !-e "/proc") {
+ warning("skipping mounting /proc because"
+ . " /proc does not exist on the outside");
+ } elsif ((any { $_ eq $options->{mode} } ('root', 'unshare'))
+ && !-d "/proc") {
+ warning("skipping mounting /proc because"
+ . " /proc on the outside is not a directory");
+ } elsif (any { $_ eq $options->{mode} } ('root', 'unshare')) {
+ # we don't know whether we run in root mode inside an unshared
+ # user namespace or as real root so we first try the real mount and
+ # then fall back to mounting in a way that works in unshared
+ if (
+ $options->{mode} eq 'root'
+ && 0 == system(
+ 'mount', '-t', 'proc', '-o', 'ro', 'proc',
+ "$options->{root}/proc"
+ )
+ ) {
+ push @cleanup_tasks, sub {
+ # some maintainer scripts mount additional stuff into /proc
+ # which we need to unmount beforehand
+ if (
+ is_mountpoint(
+ $options->{root} . "/proc/sys/fs/binfmt_misc"
+ )
+ ) {
+ 0 == system('umount',
+ "$options->{root}/proc/sys/fs/binfmt_misc")
+ or warning(
+ "umount /proc/sys/fs/binfmt_misc failed: $?");
+ }
+ 0 == system('umount', "$options->{root}/proc")
+ or warning("umount /proc failed: $?");
+ };
+ } elsif (
+ 0 == system('mount', '-t', 'proc', 'proc',
+ "$options->{root}/proc")) {
+ push @cleanup_tasks, sub {
+ # since we cannot write to /etc/mtab we need --no-mtab
+ 0 == system('umount', '--no-mtab', "$options->{root}/proc")
+ or warning("umount /proc failed: $?");
+ };
+ } elsif (
+ # if mounting proc failed, try bind-mounting it read-only as a
+ # last resort
+ 0 == system(
+ 'mount', '-o',
+ 'rbind', '/proc',
+ "$options->{root}/proc"
+ )
+ ) {
+ warning("since mounting /proc normally failed, /proc is now "
+ . "bind-mounted instead");
+ # to make sure that changes (like unmounting) to the
+ # bind-mounted /proc do not affect the outside /proc, change
+ # all the bind-mounts under /proc to be a slave mount.
+ if (
+ 0 != system('mount', '--make-rslave',
+ "$options->{root}/proc")) {
+ warning("mount --make-rslave /proc failed");
+ }
+ push @cleanup_tasks, sub {
+ # since we cannot write to /etc/mtab we need --no-mtab
+ 0 == system(
+ 'umount', '--no-mtab',
+ '--lazy', "$options->{root}/proc"
+ ) or warning("umount /proc failed: $?");
+ };
+ } else {
+ error "mount /proc failed: $?";
+ }
+ } elsif (any { $_ eq $options->{mode} } ('fakechroot', 'chrootless')) {
+ # we cannot mount in fakechroot mode
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ # prevent daemons from starting
+ # the directory might not exist in custom variant, for example
+ #
+ # ideally, we should use update-alternatives but we cannot rely on it
+ # existing inside the chroot
+ #
+ # See #911290 for more problems of this interface
+ if (any { $_ eq 'chroot/policy-rc.d' } @{ $options->{skip} }) {
+ info "skipping chroot/policy-rc.d as requested";
+ } else {
+ push @cleanup_tasks, sub {
+ if (-f "$options->{root}/usr/sbin/policy-rc.d") {
+ unlink "$options->{root}/usr/sbin/policy-rc.d"
+ or error "cannot unlink policy-rc.d: $!";
+ }
+ };
+ if (-d "$options->{root}/usr/sbin/") {
+ open my $fh, '>', "$options->{root}/usr/sbin/policy-rc.d"
+ or error "cannot open policy-rc.d: $!";
+ print $fh "#!/bin/sh\n";
+ print $fh "exit 101\n";
+ close $fh;
+ chmod 0755, "$options->{root}/usr/sbin/policy-rc.d"
+ or error "cannot chmod policy-rc.d: $!";
+ }
+ }
+
+ # the file might not exist if it was removed in a hook
+ if (any { $_ eq 'chroot/start-stop-daemon' } @{ $options->{skip} }) {
+ info "skipping chroot/start-stop-daemon as requested";
+ } else {
+ # $options->{root} must not be part of $ssdloc but must instead be
+ # evaluated at the time the cleanup is run or otherwise, when
+ # performing a pivot-root, the ssd location will still be prefixed
+ # with the chroot path even though we changed root
+ my $ssdloc;
+ if (-f "$options->{root}/sbin/start-stop-daemon") {
+ $ssdloc = "/sbin/start-stop-daemon";
+ } elsif (-f "$options->{root}/usr/sbin/start-stop-daemon") {
+ $ssdloc = "/usr/sbin/start-stop-daemon";
+ }
+ push @cleanup_tasks, sub {
+ return unless length $ssdloc;
+ if (-e "$options->{root}/$ssdloc.REAL") {
+ move(
+ "$options->{root}/$ssdloc.REAL",
+ "$options->{root}/$ssdloc"
+ ) or error "cannot move start-stop-daemon: $!";
+ }
+ };
+ if (length $ssdloc) {
+ if (-e "$options->{root}/$ssdloc.REAL") {
+ error "$options->{root}/$ssdloc.REAL already exists";
+ }
+ move(
+ "$options->{root}/$ssdloc",
+ "$options->{root}/$ssdloc.REAL"
+ ) or error "cannot move start-stop-daemon: $!";
+ open my $fh, '>', "$options->{root}/$ssdloc"
+ or error "cannot open start-stop-daemon: $!";
+ print $fh "#!/bin/sh\n";
+ print $fh
+ "echo \"Warning: Fake start-stop-daemon called, doing"
+ . " nothing\">&2\n";
+ close $fh;
+ chmod 0755, "$options->{root}/$ssdloc"
+ or error "cannot chmod start-stop-daemon: $!";
+ }
+ }
+ };
+
+ if ($@) {
+ error "setup_mounts failed: $@";
+ }
+ return @cleanup_tasks;
+}
+
+sub run_hooks {
+ my $name = shift;
+ my $options = shift;
+ my $essential_pkgs = shift;
+
+ if (scalar @{ $options->{"${name}_hook"} } == 0) {
+ return;
+ }
+
+ if ($options->{dryrun}) {
+ info "not running ${name}-hooks because of --dry-run";
+ return;
+ }
+
+ my @env_opts = ();
+ # At this point TMPDIR is set to "$options->{root}/tmp". This is to have a
+ # writable TMPDIR even in unshare mode. But if TMPDIR is still set when
+ # running hooks, then every hook script calling chroot, will have to wrap
+ # that into an "env --unset=TMPDIR". To avoid this, we unset TMPDIR here.
+ # If the hook script needs a writable TMPDIR, then it can always use /tmp
+ # inside the chroot. This is also why we do not set a new MMDEBSTRAP_TMPDIR
+ # environment variable.
+ if (length $ENV{TMPDIR}) {
+ push @env_opts, '--unset=TMPDIR';
+ }
+ # The APT_CONFIG variable, if set, will confuse any manual calls to
+ # apt-get. If you want to use the same config used by mmdebstrap, the
+ # original value is stored in MMDEBSTRAP_APT_CONFIG.
+ if (length $ENV{APT_CONFIG}) {
+ push @env_opts, '--unset=APT_CONFIG';
+ }
+ if (length $ENV{APT_CONFIG}) {
+ push @env_opts, "MMDEBSTRAP_APT_CONFIG=$ENV{APT_CONFIG}";
+ }
+ # A hook script that wants to call mmdebstrap with --hook-helper needs to
+ # know how mmdebstrap was executed
+ push @env_opts, "MMDEBSTRAP_ARGV0=$PROGRAM_NAME";
+ # Storing the mode is important for hook scripts to potentially change
+ # their behavior depending on the mode. It's also important for when the
+ # hook wants to use the mmdebstrap --hook-helper.
+ push @env_opts, "MMDEBSTRAP_MODE=$options->{mode}";
+ if (defined $options->{suite}) {
+ push @env_opts, "MMDEBSTRAP_SUITE=$options->{suite}";
+ }
+ push @env_opts, "MMDEBSTRAP_FORMAT=$options->{format}";
+ # Storing the hook name is important for hook scripts to potentially change
+ # their behavior depending on the hook. It's also important for when the
+ # hook wants to use the mmdebstrap --hook-helper.
+ push @env_opts, "MMDEBSTRAP_HOOK=$name";
+ # This is the file descriptor of the socket that the mmdebstrap
+ # --hook-helper can write to and read from to communicate with the outside.
+ push @env_opts, ("MMDEBSTRAP_HOOKSOCK=" . fileno($options->{hooksock}));
+ # Store the verbosity of mmdebstrap so that hooks can be just as verbose
+ # as the mmdebstrap invocation that called them.
+ push @env_opts, ("MMDEBSTRAP_VERBOSITY=" . $verbosity_level);
+ # Store the packages given via --include in an environment variable so that
+ # hooks can, for example, make .deb files available inside the chroot.
+ {
+ my @escaped_includes = @{ $options->{include} };
+ foreach my $incl (@escaped_includes) {
+ # We have to encode commas so that values containing commas can
+ # be stored in the list. Since we encode using percent-encoding
+ # (urlencoding) we also have to encode the percent sign.
+ $incl =~ s/%/%25/g;
+ $incl =~ s/,/%2C/g;
+ }
+ push @env_opts,
+ ("MMDEBSTRAP_INCLUDE=" . (join ",", @escaped_includes));
+ }
+ # Give the extract hook access to the essential packages that are about to
+ # be installed
+ if ($name eq "extract" and scalar @{$essential_pkgs} > 0) {
+ push @env_opts,
+ ("MMDEBSTRAP_ESSENTIAL=" . (join " ", @{$essential_pkgs}));
+ }
+ if ($options->{mode} eq 'unshare') {
+ push @env_opts, "container=mmdebstrap-unshare";
+ }
+
+ # Unset the close-on-exec flag, so that the file descriptor does not
+ # get closed when we exec
+ my $flags = fcntl($options->{hooksock}, F_GETFD, 0)
+ or error "fcntl F_GETFD: $!";
+ fcntl($options->{hooksock}, F_SETFD, $flags & ~FD_CLOEXEC)
+ or error "fcntl F_SETFD: $!";
+
+ {
+ foreach my $script (@{ $options->{"${name}_hook"} }) {
+ my $type = $script->[0];
+ $script = $script->[1];
+
+ if ($type eq "pivoted") {
+ info "running --chrooted-$name-hook in shell: sh -c "
+ . "'$script'";
+ my $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ # child
+ my @cmdprefix = ();
+ if ($options->{mode} eq 'fakechroot') {
+ # we are calling the chroot executable instead of
+ # chrooting the process so that fakechroot can handle
+ # it
+ @cmdprefix = ('chroot', $options->{root});
+ } elsif ($options->{mode} eq 'root') {
+ # unsharing the mount namespace is not enough for
+ # pivot_root to work as root (why?) unsharing the user
+ # namespace as well (but without remapping) makes
+ # pivot_root work (why??) but still makes later lazy
+ # umounts fail (why???). Since pivot_root is mainly
+ # useful for being able to run unshare mode inside
+ # unshare mode, we fall back to just calling chroot()
+ # until somebody has motivation and time to figure out
+ # what is going on.
+ chroot $options->{root}
+ or error "failed to chroot(): $!";
+ $options->{root} = "/";
+ chdir "/" or error "failed chdir() to /: $!";
+ } elsif ($options->{mode} eq 'unshare') {
+ 0 == syscall &SYS_unshare, $CLONE_NEWNS
+ or error "unshare() failed: $!";
+ pivot_root($options->{root});
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+ 0 == system(@cmdprefix, 'env', @env_opts, 'sh', '-c',
+ $script)
+ or error "command failed: $script";
+ exit 0;
+ }
+ waitpid($pid, 0);
+ $? == 0 or error "chrooted hook failed with exit code $?";
+ next;
+ }
+
+ # inode and device number of chroot before
+ my ($dev_before, $ino_before, undef) = stat($options->{root});
+
+ if (
+ $script =~ /^(
+ copy-in|copy-out
+ |tar-in|tar-out
+ |upload|download
+ |sync-in|sync-out
+ )\ /x
+ ) {
+ info "running special hook: $script";
+ if ((any { $_ eq $options->{variant} } ('extract', 'custom'))
+ and $options->{mode} eq 'fakechroot'
+ and $name ne 'setup') {
+ info "the copy-in, copy-out, tar-in and tar-out commands"
+ . " in fakechroot mode might fail in"
+ . " extract and custom variants because there might be"
+ . " no tar inside the chroot";
+ }
+
+ my $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ # whatever the script writes on stdout is sent to the
+ # socket
+ # whatever is written to the socket, send to stdin
+ open(STDOUT, '>&', $options->{hooksock})
+ or error "cannot open STDOUT: $!";
+ open(STDIN, '<&', $options->{hooksock})
+ or error "cannot open STDIN: $!";
+
+ # Text::ParseWords::shellwords does for perl what shlex
+ # does for python
+ my @args = shellwords $script;
+ hookhelper($options->{root}, $options->{mode}, $name,
+ (join ',', @{ $options->{skip} }),
+ $verbosity_level, @args);
+ exit 0;
+ }
+ waitpid($pid, 0);
+ $? == 0 or error "special hook failed with exit code $?";
+ } elsif (-x $script || $script !~ m/[^\w@\%+=:,.\/-]/a) {
+ info "running --$name-hook directly: $script $options->{root}";
+ # execute it directly if it's an executable file
+ # or if it there are no shell metacharacters
+ # (the /a regex modifier makes \w match only ASCII)
+ 0 == system('env', @env_opts, $script, $options->{root})
+ or error "command failed: $script";
+ } else {
+ info "running --$name-hook in shell: sh -c '$script' exec"
+ . " $options->{root}";
+ # otherwise, wrap everything in sh -c
+ 0 == system('env', @env_opts,
+ 'sh', '-c', $script, 'exec', $options->{root})
+ or error "command failed: $script";
+ }
+
+ # If the chroot directory vanished, check if pivot_root was
+ # performed.
+ #
+ # Running pivot_root is only really useful in the customize-hooks
+ # because mmdebstrap uses apt from the outside to install packages
+ # and that will fail after pivot_root because the process doesn't
+ # have access to the system on the outside anymore.
+ if (!-e $options->{root}) {
+ my ($dev_root, $ino_root, undef) = stat("/");
+ if ($dev_before == $dev_root and $ino_before == $ino_root) {
+ info "detected pivot_root, changing chroot directory to /";
+ # the old chroot directory is now /
+ # the hook probably executed pivot_root
+ $options->{root} = "/";
+ chdir "/" or error "failed chdir() to /: $!";
+ } else {
+ error "chroot directory $options->{root} vanished";
+ }
+ }
+ }
+ };
+
+ # Restore flags
+ fcntl($options->{hooksock}, F_SETFD, $flags) or error "fcntl F_SETFD: $!";
+ return;
+}
+
+sub setup {
+ my $options = shift;
+
+ foreach my $key (sort keys %{$options}) {
+ my $value = $options->{$key};
+ if (!defined $value) {
+ next;
+ }
+ if (ref $value eq '') {
+ debug "$key: $options->{$key}";
+ } elsif (ref $value eq 'ARRAY') {
+ debug "$key: [" . (join ', ', @{$value}) . "]";
+ } elsif (ref $value eq 'GLOB') {
+ debug "$key: GLOB";
+ } else {
+ error "unknown type for key $key: " . (ref $value);
+ }
+ }
+
+ if (-e $options->{apttrusted} && !-r $options->{apttrusted}) {
+ warning "cannot read $options->{apttrusted}";
+ }
+ if (-e $options->{apttrustedparts} && !-r $options->{apttrustedparts}) {
+ warning "cannot read $options->{apttrustedparts}";
+ }
+
+ if (any { $_ eq 'setup' } @{ $options->{skip} }) {
+ info "skipping setup as requested";
+ } else {
+ run_setup($options);
+ }
+
+ run_hooks('setup', $options);
+
+ # apt runs dpkg from inside the chroot and directly passes the filename to
+ # dpkg. Hence, the included files on the outside must be present under the
+ # same path on the inside. If they are not, dpkg cannot find them.
+ if (scalar(grep { /^\// } @{ $options->{include} }) > 0) {
+ my $ret = 0;
+ foreach my $f (grep { /^\// } @{ $options->{include} }) {
+ next if -e "$options->{root}/$f";
+ warning
+ "path given via --include is not present inside the chroot: $f";
+ $ret = 1;
+ }
+ if ($ret != 0) {
+ warning("apt runs chrooted dpkg which needs access to the "
+ . "package paths given via --include inside the chroot.");
+ warning "maybe try running mmdebstrap with "
+ . "--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount";
+ }
+ }
+
+ if (any { $_ eq 'update' } @{ $options->{skip} }) {
+ info "skipping update as requested";
+ } else {
+ run_update($options);
+ }
+
+ (my $essential_pkgs, my $cached_debs) = run_download($options);
+
+ # in theory, we don't have to extract the packages in chrootless mode
+ # but we do it anyways because otherwise directory creation timestamps
+ # will differ compared to non-chrootless and we want to create bit-by-bit
+ # identical tar output
+ #
+ # FIXME: dpkg could be changed to produce the same results
+ run_extract($options, $essential_pkgs);
+
+ # setup mounts
+ my @cleanup_tasks = ();
+ my $cleanup = sub {
+ my $signal = $_[0];
+ while (my $task = pop @cleanup_tasks) {
+ $task->();
+ }
+ if ($signal) {
+ warning "pid $PID cought signal: $signal";
+ exit 1;
+ }
+ };
+
+ # we only need to setup the mounts if there is anything to do
+ if ( $options->{variant} ne 'custom'
+ or scalar @{ $options->{include} } > 0
+ or scalar @{ $options->{"extract_hook"} } > 0
+ or scalar @{ $options->{"essential_hook"} } > 0
+ or scalar @{ $options->{"customize_hook"} } > 0) {
+ local $SIG{INT} = $cleanup;
+ local $SIG{HUP} = $cleanup;
+ local $SIG{PIPE} = $cleanup;
+ local $SIG{TERM} = $cleanup;
+
+ @cleanup_tasks = setup_mounts($options);
+ }
+
+ eval {
+ my $chrootcmd = [];
+ if ($options->{variant} ne 'extract') {
+ if ($options->{mode} ne 'chrootless') {
+ $chrootcmd = run_prepare($options);
+ }
+ }
+
+ run_hooks('extract', $options, $essential_pkgs);
+
+ if ($options->{variant} ne 'extract') {
+ run_essential($options, $essential_pkgs, $chrootcmd, $cached_debs);
+
+ run_hooks('essential', $options);
+
+ run_install($options);
+
+ run_hooks('customize', $options);
+ }
+ };
+
+ my $msg = $@;
+
+ $cleanup->(0);
+ if ($msg) {
+ error "setup failed: $msg";
+ }
+
+ if (any { $_ eq 'cleanup' } @{ $options->{skip} }) {
+ info "skipping cleanup as requested";
+ } else {
+ run_cleanup($options);
+ }
+
+ return;
+}
+
+sub run_setup() {
+ my $options = shift;
+
+ {
+ my @directories = (
+ '/etc/apt/apt.conf.d', '/etc/apt/sources.list.d',
+ '/etc/apt/preferences.d', '/var/cache/apt',
+ '/var/lib/apt/lists/partial', '/tmp'
+ );
+ # we need /var/lib/dpkg in case we need to write to /var/lib/dpkg/arch
+ push @directories, '/var/lib/dpkg';
+ # since we do not know the dpkg version inside the chroot at this
+ # point, we can only omit it in chrootless mode
+ if ($options->{mode} ne 'chrootless'
+ or length $options->{dpkgopts} > 0) {
+ push @directories, '/etc/dpkg/dpkg.cfg.d/';
+ }
+ # if dpkg and apt operate from the outside we need some more
+ # directories because dpkg and apt might not even be installed inside
+ # the chroot. Thus, the following block is not strictly necessary in
+ # chrootless mode. We unconditionally add it anyways, so that the
+ # output with and without chrootless mode is equal.
+ {
+ push @directories, '/var/log/apt';
+ # since we do not know the dpkg version inside the chroot at this
+ # point, we can only omit it in chrootless mode
+ if ($options->{mode} ne 'chrootless') {
+ push @directories, '/var/lib/dpkg/triggers',
+ '/var/lib/dpkg/info', '/var/lib/dpkg/alternatives',
+ '/var/lib/dpkg/updates';
+ }
+ }
+ foreach my $dir (@directories) {
+ if (-e "$options->{root}/$dir") {
+ if (!-d "$options->{root}/$dir") {
+ error "$dir already exists but is not a directory";
+ }
+ } else {
+ my $num_created = make_path "$options->{root}/$dir",
+ { error => \my $err };
+ if ($err && @$err) {
+ error(
+ join "; ",
+ (map { "cannot create " . (join ": ", %{$_}) } @$err));
+ } elsif ($num_created == 0) {
+ error "cannot create $options->{root}/$dir";
+ }
+ }
+ }
+ # make sure /tmp is not 0755 like the rest
+ chmod 01777, "$options->{root}/tmp" or error "cannot chmod /tmp: $!";
+ }
+
+ # The TMPDIR set by the user or even /tmp might be inaccessible by the
+ # unshared user. Thus, we place all temporary files in /tmp inside the new
+ # rootfs.
+ #
+ # This will affect calls to tempfile() as well as runs of "apt-get update"
+ # which will create temporary clearsigned.message.XXXXXX files to verify
+ # signatures.
+ #
+ # Setting TMPDIR to inside the chroot is also necessary for when packages
+ # are installed with apt from outside the chroot with
+ # DPkg::Chroot-Directory
+ {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{"TMPDIR"} = "$options->{root}/tmp";
+ }
+
+ my ($conf, $tmpfile)
+ = tempfile("mmdebstrap.apt.conf.XXXXXXXXXXXX", TMPDIR => 1)
+ or error "cannot open apt.conf: $!";
+ print $conf "Apt::Architecture \"$options->{nativearch}\";\n";
+ # the host system might have configured additional architectures
+ # force only the native architecture
+ if (scalar @{ $options->{foreignarchs} } > 0) {
+ print $conf "Apt::Architectures { \"$options->{nativearch}\"; ";
+ foreach my $arch (@{ $options->{foreignarchs} }) {
+ print $conf "\"$arch\"; ";
+ }
+ print $conf "};\n";
+ } else {
+ print $conf "Apt::Architectures \"$options->{nativearch}\";\n";
+ }
+ print $conf "Dir \"$options->{root}\";\n";
+ print $conf "DPkg::Chroot-Directory \"$options->{root}\";\n";
+ # not needed anymore for apt 1.3 and newer
+ print $conf
+ "Dir::State::Status \"$options->{root}/var/lib/dpkg/status\";\n";
+ # for authentication, use the keyrings from the host
+ print $conf "Dir::Etc::Trusted \"$options->{apttrusted}\";\n";
+ print $conf "Dir::Etc::TrustedParts \"$options->{apttrustedparts}\";\n";
+ if ($options->{variant} ne 'apt') {
+ # apt considers itself essential. Thus, when generating an EDSP
+ # document for an external solver, it will add the Essential:yes field
+ # to the apt package stanza. This is unnecessary for any other variant
+ # than 'apt' because in all other variants we compile the set of
+ # packages we consider essential ourselves and for the 'essential'
+ # variant it would even be wrong to add apt. This workaround is only
+ # needed when apt is used with an external solver but doesn't hurt
+ # otherwise and we don't have a good way to figure out whether apt is
+ # using an external solver or not short of parsing the --aptopt
+ # options.
+ print $conf "pkgCacheGen::ForceEssential \",\";\n";
+ }
+ close $conf;
+
+ # We put certain configuration items in their own configuration file
+ # because they have to be valid for apt invocation from outside as well as
+ # from inside the chroot.
+ # The config filename is chosen such that any settings in it will be
+ # overridden by what the user specified with --aptopt.
+ if (!-e "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap") {
+ open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
+ or error "cannot open /etc/apt/apt.conf.d/00mmdebstrap: $!";
+ print $fh "Apt::Install-Recommends false;\n";
+ print $fh "Acquire::Languages \"none\";\n";
+ close $fh;
+ }
+
+ # apt-get update requires this
+ if (!-e "$options->{root}/var/lib/dpkg/status") {
+ open my $fh, '>', "$options->{root}/var/lib/dpkg/status"
+ or error "failed to open(): $!";
+ close $fh;
+ }
+
+ # In theory, /var/lib/dpkg/arch is only useful if there are foreign
+ # architectures configured or if the architecture of a chrootless chroot
+ # is different from the native architecture outside the chroot.
+ # We nevertheless always add /var/lib/dpkg/arch to make a chroot built the
+ # normal way bit-by-bit identical to a foreign arch chroot built in
+ # chrootless mode.
+ chomp(my $hostarch = `dpkg --print-architecture`);
+ if ((!-e "$options->{root}/var/lib/dpkg/arch")) {
+ open my $fh, '>', "$options->{root}/var/lib/dpkg/arch"
+ or error "cannot open /var/lib/dpkg/arch: $!";
+ print $fh "$options->{nativearch}\n";
+ foreach my $arch (@{ $options->{foreignarchs} }) {
+ print $fh "$arch\n";
+ }
+ close $fh;
+ }
+
+ if (length $options->{aptopts} > 0
+ and (!-e "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap")) {
+ open my $fh, '>', "$options->{root}/etc/apt/apt.conf.d/99mmdebstrap"
+ or error "cannot open /etc/apt/apt.conf.d/99mmdebstrap: $!";
+ print $fh $options->{aptopts};
+ close $fh;
+ if ($verbosity_level >= 3) {
+ debug "content of /etc/apt/apt.conf.d/99mmdebstrap:";
+ copy("$options->{root}/etc/apt/apt.conf.d/99mmdebstrap", \*STDERR);
+ }
+ }
+
+ if (length $options->{dpkgopts} > 0
+ and (!-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")) {
+ # FIXME: in chrootless mode, dpkg will only read the configuration
+ # from the host -- see #808203
+ if ($options->{mode} eq 'chrootless') {
+ warning('dpkg is unable to read an alternative configuration in'
+ . 'chrootless mode -- see Debian bug #808203');
+ }
+ open my $fh, '>', "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap"
+ or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
+ print $fh $options->{dpkgopts};
+ close $fh;
+ if ($verbosity_level >= 3) {
+ debug "content of /etc/dpkg/dpkg.cfg.d/99mmdebstrap:";
+ copy("$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap",
+ \*STDERR);
+ }
+ }
+
+ if (!-e "$options->{root}/etc/fstab") {
+ open my $fh, '>', "$options->{root}/etc/fstab"
+ or error "cannot open fstab: $!";
+ print $fh "# UNCONFIGURED FSTAB FOR BASE SYSTEM\n";
+ close $fh;
+ chmod 0644, "$options->{root}/etc/fstab"
+ or error "cannot chmod fstab: $!";
+ }
+
+ # write /etc/apt/sources.list and files in /etc/apt/sources.list.d/
+ if (scalar @{ $options->{sourceslists} } > 0) {
+ my $firstentry = $options->{sourceslists}->[0];
+ # if the first sources.list entry is of one-line type and without
+ # explicit filename, then write out an actual /etc/apt/sources.list
+ # otherwise everything goes into /etc/apt/sources.list.d
+ my $fname;
+ if ($firstentry->{type} eq 'one-line'
+ && !defined $firstentry->{fname}) {
+ $fname = "$options->{root}/etc/apt/sources.list";
+ } else {
+ $fname = "$options->{root}/etc/apt/sources.list.d/0000";
+ if (defined $firstentry->{fname}) {
+ $fname .= $firstentry->{fname};
+ if ( $firstentry->{fname} !~ /\.list/
+ && $firstentry->{fname} !~ /\.sources/) {
+ if ($firstentry->{type} eq 'one-line') {
+ $fname .= '.list';
+ } elsif ($firstentry->{type} eq 'deb822') {
+ $fname .= '.sources';
+ } else {
+ error "invalid type: $firstentry->{type}";
+ }
+ }
+ } else {
+ # if no filename is given, then this must be a deb822 file
+ # because if it was a one-line type file, then it would've been
+ # written to /etc/apt/sources.list
+ $fname .= 'main.sources';
+ }
+ }
+ if (!-e $fname) {
+ open my $fh, '>', "$fname" or error "cannot open $fname: $!";
+ print $fh $firstentry->{content};
+ close $fh;
+ }
+ # everything else goes into /etc/apt/sources.list.d/
+ for (my $i = 1 ; $i < scalar @{ $options->{sourceslists} } ; $i++) {
+ my $entry = $options->{sourceslists}->[$i];
+ my $fname = "$options->{root}/etc/apt/sources.list.d/"
+ . sprintf("%04d", $i);
+ if (defined $entry->{fname}) {
+ $fname .= $entry->{fname};
+ if ( $entry->{fname} !~ /\.list/
+ && $entry->{fname} !~ /\.sources/) {
+ if ($entry->{type} eq 'one-line') {
+ $fname .= '.list';
+ } elsif ($entry->{type} eq 'deb822') {
+ $fname .= '.sources';
+ } else {
+ error "invalid type: $entry->{type}";
+ }
+ }
+ } else {
+ if ($entry->{type} eq 'one-line') {
+ $fname .= 'main.list';
+ } elsif ($entry->{type} eq 'deb822') {
+ $fname .= 'main.sources';
+ } else {
+ error "invalid type: $entry->{type}";
+ }
+ }
+ if (!-e $fname) {
+ open my $fh, '>', "$fname" or error "cannot open $fname: $!";
+ print $fh $entry->{content};
+ close $fh;
+ }
+ }
+ }
+
+ # allow network access from within
+ foreach my $file ("/etc/resolv.conf", "/etc/hostname") {
+ if (-e $file && !-e "$options->{root}/$file") {
+ # this will create a new file with 644 permissions and copy
+ # contents only even if $file was a symlink
+ copy($file, "$options->{root}/$file")
+ or error "cannot copy $file: $!";
+ # if the source was a regular file, preserve the permissions
+ if (-f $file) {
+ my $mode = (stat($file))[2];
+ $mode &= oct(7777); # mask off bits that aren't the mode
+ chmod $mode, "$options->{root}/$file"
+ or error "cannot chmod $file: $!";
+ }
+ } elsif (-e $file && -e "$options->{root}/$file") {
+ info "rootfs alreday contains $file";
+ } else {
+ warning("Host system does not have a $file to copy into the"
+ . " rootfs.");
+ }
+ }
+
+ if ($options->{havemknod}) {
+ foreach my $file (@devfiles) {
+ my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
+ = @{$file};
+ if ($type == 0) { # normal file
+ error "type 0 not implemented";
+ } elsif ($type == 1) { # hardlink
+ error "type 1 not implemented";
+ } elsif ($type == 2) { # symlink
+ if ( $options->{mode} eq 'fakechroot'
+ and $linkname =~ /^\/proc/) {
+ # there is no /proc in fakechroot mode
+ next;
+ }
+ symlink $linkname, "$options->{root}/dev/$fname"
+ or error "cannot create symlink ./dev/$fname";
+ next; # chmod cannot work on symlinks
+ } elsif ($type == 3) { # character special
+ 0 == system('mknod', "$options->{root}/dev/$fname", 'c',
+ $devmajor, $devminor)
+ or error "mknod failed: $?";
+ } elsif ($type == 4) { # block special
+ 0 == system('mknod', "$options->{root}/dev/$fname", 'b',
+ $devmajor, $devminor)
+ or error "mknod failed: $?";
+ } elsif ($type == 5) { # directory
+ if (-e "$options->{root}/dev/$fname") {
+ if (!-d "$options->{root}/dev/$fname") {
+ error
+ "./dev/$fname already exists but is not a directory";
+ }
+ } else {
+ my $num_created = make_path "$options->{root}/dev/$fname",
+ { error => \my $err };
+ if ($err && @$err) {
+ error(
+ join "; ",
+ (
+ map { "cannot create " . (join ": ", %{$_}) }
+ @$err
+ ));
+ } elsif ($num_created == 0) {
+ error "cannot create $options->{root}/dev/$fname";
+ }
+ }
+ } else {
+ error "unsupported type: $type";
+ }
+ chmod $mode, "$options->{root}/dev/$fname"
+ or error "cannot chmod ./dev/$fname: $!";
+ }
+ }
+
+ # we tell apt about the configuration via a config file passed via the
+ # APT_CONFIG environment variable instead of using the --option command
+ # line arguments because configuration settings like Dir::Etc have already
+ # been evaluated at the time that apt takes its command line arguments
+ # into account.
+ {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{"APT_CONFIG"} = "$tmpfile";
+ }
+ # we have to make the config file world readable so that a possible
+ # /usr/lib/apt/solvers/apt process which is run by the _apt user is also
+ # able to read it
+ chmod 0644, "$tmpfile" or error "cannot chmod $tmpfile: $!";
+ if ($verbosity_level >= 3) {
+ 0 == system('apt-get', '--version')
+ or error "apt-get --version failed: $?";
+ 0 == system('apt-config', 'dump') or error "apt-config failed: $?";
+ debug "content of $tmpfile:";
+ copy($tmpfile, \*STDERR);
+ }
+
+ if ($options->{mode} ne 'fakechroot') {
+ # Apt dropping privileges to another user than root is not useful in
+ # fakechroot mode because all users are faked and thus there is no real
+ # privilege difference anyways. We could set APT::Sandbox::User "root"
+ # in fakechroot mode but we don't because if we would, then
+ # /var/cache/apt/archives/partial/ and /var/lib/apt/lists/partial/
+ # would not be owned by the _apt user if mmdebstrap was run in
+ # fakechroot mode.
+ #
+ # when apt-get update is run by the root user, then apt will attempt to
+ # drop privileges to the _apt user. This will fail if the _apt user
+ # does not have permissions to read the root directory. In that case,
+ # we have to disable apt sandboxing. This can for example happen in
+ # root mode when the path of the chroot is not in a world-readable
+ # location.
+ my $partial = '/var/lib/apt/lists/partial';
+ my @testcmd = (
+ '/usr/lib/apt/apt-helper', 'drop-privs', '--', 'test',
+ '-r', "$options->{root}$partial"
+ );
+ my $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ open(STDOUT, '>', '/dev/null')
+ or error "cannot open /dev/null for writing: $!";
+ open(STDERR, '>', '/dev/null')
+ or error "cannot open /dev/null for writing: $!";
+ exec { $testcmd[0] } @testcmd
+ or error("cannot exec " . (join " ", @testcmd) . ": $!");
+ }
+ waitpid $pid, 0;
+ if ($? != 0) {
+ warning "Download is performed unsandboxed as root as file"
+ . " $options->{root}$partial couldn't be accessed by user _apt";
+ open my $fh, '>>', $tmpfile
+ or error "cannot open $tmpfile for appending: $!";
+ print $fh "APT::Sandbox::User \"root\";\n";
+ close $fh;
+ }
+ }
+
+ return;
+}
+
+sub run_update() {
+ my $options = shift;
+
+ my $aptopts = {
+ ARGV => ['apt-get', 'update', '--error-on=any'],
+ CHDIR => $options->{root},
+ };
+
+ # Maybe "apt-get update" was already run in the setup hook? If yes, skip
+ # running it here. We are overly strict on purpose because better to run it
+ # twice on accident than not at all.
+ if ( !-d "$options->{root}/var/lib/apt/lists/auxfiles"
+ || !-d "$options->{root}/var/lib/apt/lists/partial"
+ || !-e "$options->{root}/var/lib/apt/lists/lock"
+ || !-e "$options->{root}/var/cache/apt/pkgcache.bin"
+ || !-e "$options->{root}/var/cache/apt/srcpkgcache.bin") {
+ info "running apt-get update...";
+ run_apt_progress($aptopts);
+ } else {
+ info "skipping apt-get update because it was already run";
+ }
+
+ # check if anything was downloaded at all
+ {
+ open my $fh, '-|', 'apt-get',
+ 'indextargets' // error "failed to fork(): $!";
+ chomp(
+ my $indextargets = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ($indextargets eq '') {
+ warning("apt-get indextargets output is empty");
+ if (scalar @{ $options->{sourceslists} } == 0) {
+ warning "no known apt sources.list entry";
+ }
+ for my $list (@{ $options->{sourceslists} }) {
+ if (defined $list->{fname}) {
+ info("Filename: $list->{fname}");
+ }
+ info("Type: $list->{type}");
+ info("Content:");
+ for my $line (split "\n", $list->{content}) {
+ info(" $line");
+ }
+ }
+ open(my $fh, '-|', 'apt-cache', 'policy')
+ // error "failed to fork(): $!";
+ while (my $line = <$fh>) {
+ chomp $line;
+ info $line;
+ }
+ close $fh;
+ my $msg
+ = "apt-get update did not find any indices "
+ . "for architecture '$options->{nativearch}' in ";
+ if (length $options->{suite}) {
+ $msg .= "suite '$options->{suite}'";
+ } else {
+ $msg .= "the configured apt sources";
+ }
+ error $msg;
+ }
+ }
+
+ return;
+}
+
+sub run_download() {
+ my $options = shift;
+
+ # In the future we want to replace downloading packages with "apt-get
+ # install" and installing them with dpkg by just installing the essential
+ # packages with apt from the outside with DPkg::Chroot-Directory.
+ # We are not doing that because then the preinst script of base-passwd will
+ # not be called early enough and packages will fail to install because they
+ # are missing /etc/passwd.
+ my @cached_debs = ();
+ my @dl_debs = ();
+ if (
+ !$options->{dryrun}
+ && ((none { $_ eq $options->{variant} } ('extract', 'custom'))
+ || scalar @{ $options->{include} } != 0)
+ && -d "$options->{root}/var/cache/apt/archives/"
+ ) {
+ my $apt_archives = "/var/cache/apt/archives/";
+ opendir my $dh, "$options->{root}/$apt_archives"
+ or error "cannot read $apt_archives";
+ while (my $deb = readdir $dh) {
+ if ($deb !~ /\.deb$/) {
+ next;
+ }
+ if (!-f "$options->{root}/$apt_archives/$deb") {
+ next;
+ }
+ push @cached_debs, $deb;
+ }
+ closedir $dh;
+ }
+
+ # To figure out the right package set for the apt variant we can use:
+ # $ apt-get dist-upgrade -o dir::state::status=/dev/null
+ # This is because that variants only contain essential packages and
+ # apt and libapt treats apt as essential. If we want to install less
+ # (essential variant) then we have to compute the package set ourselves.
+ # Same if we want to install priority based variants.
+ if (any { $_ eq $options->{variant} } ('extract', 'custom')) {
+ if (scalar @{ $options->{include} } == 0) {
+ info "nothing to download -- skipping...";
+ return ([], \@cached_debs);
+ }
+ my @apt_argv = ('install', @{ $options->{include} });
+
+ @dl_debs = run_apt_download_progress({
+ APT_ARGV => [@apt_argv],
+ dryrun => $options->{dryrun},
+ },
+ );
+ } elsif ($options->{variant} eq 'apt') {
+ # if we just want to install Essential:yes packages, apt and their
+ # dependencies then we can make use of libapt treating apt as
+ # implicitly essential. An upgrade with the (currently) empty status
+ # file will trigger an installation of the essential packages plus apt.
+ #
+ # 2018-09-02, #debian-dpkg on OFTC, times in UTC+2
+ # 23:39 < josch> I'll just put it in my script and if it starts
+ # breaking some time I just say it's apt's fault. :P
+ # 23:42 < DonKult> that is how it usually works, so yes, do that :P (<-
+ # and please add that line next to it so you can
+ # remind me in 5+ years that I said that after I wrote
+ # in the bugreport: "Are you crazy?!? Nobody in his
+ # right mind would even suggest depending on it!")
+ @dl_debs = run_apt_download_progress({
+ APT_ARGV => ['dist-upgrade'],
+ dryrun => $options->{dryrun},
+ },
+ );
+ } elsif (any { $_ eq $options->{variant} }
+ ('essential', 'standard', 'important', 'required', 'buildd')) {
+ # 2021-06-07, #debian-apt on OFTC, times in UTC+2
+ # 17:27 < DonKult> (?essential includes 'apt' through)
+ # 17:30 < josch> DonKult: no, because pkgCacheGen::ForceEssential ",";
+ # 17:32 < DonKult> touché
+ @dl_debs = run_apt_download_progress({
+ APT_ARGV => [
+ 'install',
+ '?narrow('
+ . (
+ length($options->{suite})
+ ? '?or(?archive(^'
+ . $options->{suite}
+ . '$),?codename(^'
+ . $options->{suite} . '$)),'
+ : ''
+ )
+ . '?architecture('
+ . $options->{nativearch}
+ . '),?essential)'
+ ],
+ dryrun => $options->{dryrun},
+ },
+ );
+ } else {
+ error "unknown variant: $options->{variant}";
+ }
+
+ my @essential_pkgs;
+ # strip the chroot directory from the filenames
+ foreach my $deb (@dl_debs) {
+ # if filename does not start with chroot directory then the user
+ # might've used a file:// mirror and we check whether the path is
+ # accessible inside the chroot
+ if (rindex $deb, $options->{root}, 0) {
+ if (!-e "$options->{root}/$deb") {
+ error "package file $deb not accessible from chroot directory"
+ . " -- use copy:// instead of file:// or a bind-mount. You"
+ . " can also try using --hook-dir=/usr/share/mmdebstrap/"
+ . "hooks/file-mirror-automount to automatically create"
+ . " bind-mounts or copy the files as necessary.";
+ }
+ push @essential_pkgs, $deb;
+ next;
+ }
+ # filename starts with chroot directory, strip it off
+ # this is the normal case
+ if (!-e $deb) {
+ error "cannot find package file $deb";
+ }
+ push @essential_pkgs, substr($deb, length($options->{root}));
+ }
+
+ return (\@essential_pkgs, \@cached_debs);
+}
+
+sub run_extract() {
+ my $options = shift;
+ my $essential_pkgs = shift;
+
+ if ($options->{dryrun}) {
+ info "skip extracting packages because of --dry-run";
+ return;
+ }
+
+ if (scalar @{$essential_pkgs} == 0) {
+ info "nothing to extract -- skipping...";
+ return;
+ }
+
+ info "extracting archives...";
+ print_progress 0.0;
+ my $counter = 0;
+ my $total = scalar @{$essential_pkgs};
+ foreach my $deb (@{$essential_pkgs}) {
+ $counter += 1;
+
+ my $tarfilter;
+ my @tarfilterargs;
+ # if the path-excluded option was added to the dpkg config,
+ # insert the tarfilter between dpkg-deb and tar
+ if (-e "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap") {
+ open(my $fh, '<',
+ "$options->{root}/etc/dpkg/dpkg.cfg.d/99mmdebstrap")
+ or error "cannot open /etc/dpkg/dpkg.cfg.d/99mmdebstrap: $!";
+ my @matches = grep { /^path-(?:exclude|include)=/ } <$fh>;
+ close $fh;
+ chop @matches; # remove trailing newline
+ @tarfilterargs = map { "--" . $_ } @matches;
+ }
+ if (scalar @tarfilterargs > 0) {
+ if (-x "./tarfilter") {
+ $tarfilter = "./tarfilter";
+ } else {
+ $tarfilter = "mmtarfilter";
+ }
+ }
+
+ my $dpkg_writer;
+ my $tar_reader;
+ my $filter_reader;
+ my $filter_writer;
+ if (scalar @tarfilterargs > 0) {
+ pipe $filter_reader, $dpkg_writer or error "pipe failed: $!";
+ pipe $tar_reader, $filter_writer or error "pipe failed: $!";
+ } else {
+ pipe $tar_reader, $dpkg_writer or error "pipe failed: $!";
+ }
+ # not using dpkg-deb --extract as that would replace the
+ # merged-usr symlinks with plain directories
+ # even after switching from pre-merging to post-merging, dpkg-deb
+ # will ignore filter rules from dpkg.cfg.d
+ # https://bugs.debian.org/989602
+ # not using dpkg --unpack because that would try running preinst
+ # maintainer scripts
+ my $pid1 = fork() // error "fork() failed: $!";
+ if ($pid1 == 0) {
+ open(STDOUT, '>&', $dpkg_writer) or error "cannot open STDOUT: $!";
+ close($tar_reader) or error "cannot close tar_reader: $!";
+ if (scalar @tarfilterargs > 0) {
+ close($filter_reader)
+ or error "cannot close filter_reader: $!";
+ close($filter_writer)
+ or error "cannot close filter_writer: $!";
+ }
+ debug("running dpkg-deb --fsys-tarfile $options->{root}/$deb");
+ eval { Devel::Cover::set_coverage("none") } if $is_covering;
+ exec 'dpkg-deb', '--fsys-tarfile', "$options->{root}/$deb";
+ }
+ my $pid2;
+ if (scalar @tarfilterargs > 0) {
+ $pid2 = fork() // error "fork() failed: $!";
+ if ($pid2 == 0) {
+ open(STDIN, '<&', $filter_reader)
+ or error "cannot open STDIN: $!";
+ open(STDOUT, '>&', $filter_writer)
+ or error "cannot open STDOUT: $!";
+ close($dpkg_writer) or error "cannot close dpkg_writer: $!";
+ close($tar_reader) or error "cannot close tar_reader: $!";
+ debug("running $tarfilter " . (join " ", @tarfilterargs));
+ eval { Devel::Cover::set_coverage("none") } if $is_covering;
+ exec $tarfilter, @tarfilterargs;
+ }
+ }
+ my $pid3 = fork() // error "fork() failed: $!";
+ if ($pid3 == 0) {
+ open(STDIN, '<&', $tar_reader) or error "cannot open STDIN: $!";
+ close($dpkg_writer) or error "cannot close dpkg_writer: $!";
+ if (scalar @tarfilterargs > 0) {
+ close($filter_reader)
+ or error "cannot close filter_reader: $!";
+ close($filter_writer)
+ or error "cannot close filter_writer: $!";
+ }
+ debug( "running tar -C $options->{root}"
+ . " --keep-directory-symlink --extract --file -");
+ eval { Devel::Cover::set_coverage("none") } if $is_covering;
+ exec 'tar', '-C', $options->{root},
+ '--keep-directory-symlink', '--extract', '--file', '-';
+ }
+ close($dpkg_writer) or error "cannot close dpkg_writer: $!";
+ close($tar_reader) or error "cannot close tar_reader: $!";
+ if (scalar @tarfilterargs > 0) {
+ close($filter_reader) or error "cannot close filter_reader: $!";
+ close($filter_writer) or error "cannot close filter_writer: $!";
+ }
+ waitpid($pid1, 0);
+ $? == 0 or error "dpkg-deb --fsys-tarfile failed: $?";
+ if (scalar @tarfilterargs > 0) {
+ waitpid($pid2, 0);
+ $? == 0 or error "tarfilter failed: $?";
+ }
+ waitpid($pid3, 0);
+ $? == 0 or error "tar --extract failed: $?";
+ print_progress($counter / $total * 100, "extracting");
+ }
+ print_progress "done";
+
+ return;
+}
+
+sub run_prepare {
+ my $options = shift;
+
+ if ($options->{mode} eq 'fakechroot') {
+ # this borrows from and extends
+ # /etc/fakechroot/debootstrap.env and
+ # /etc/fakechroot/chroot.env
+ {
+ my %subst = (
+ chroot => "/usr/sbin/chroot.fakechroot",
+ mkfifo => "/bin/true",
+ ldconfig => (getcwd() . '/ldconfig.fakechroot'),
+ ldd => "/usr/bin/ldd.fakechroot",
+ ischroot => "/bin/true"
+ );
+ if (!-x $subst{ldconfig}) {
+ $subst{ldconfig}
+ = '/usr/libexec/mmdebstrap/ldconfig.fakechroot';
+ }
+ my %mergedusrmap = (
+ "/bin" => "/usr/bin",
+ "/sbin" => "/usr/sbin",
+ "/usr/bin/" => "/bin",
+ "/usr/sbin" => "/sbin"
+ );
+ my %fakechrootsubst;
+ foreach my $d (split ':', $ENV{PATH}) {
+ foreach my $k (sort %subst) {
+ my $mapped_path = $mergedusrmap{$d} // $d;
+ next if !-e "$d/$k" && !-e "$mapped_path/$k";
+ $fakechrootsubst{"$d/$k=$subst{$k}"} = 1;
+ $fakechrootsubst{"$mapped_path/$k=$subst{$k}"} = 1;
+ }
+ }
+ if (defined $ENV{FAKECHROOT_CMD_SUBST}
+ && $ENV{FAKECHROOT_CMD_SUBST} ne "") {
+ foreach my $e (split /:/, $ENV{FAKECHROOT_CMD_SUBST}) {
+ $fakechrootsubst{$e} = 1;
+ }
+ }
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{FAKECHROOT_CMD_SUBST} = join ':',
+ (sort keys %fakechrootsubst);
+ }
+ if (defined $ENV{FAKECHROOT_EXCLUDE_PATH}
+ && $ENV{FAKECHROOT_EXCLUDE_PATH} ne "") {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{FAKECHROOT_EXCLUDE_PATH}
+ = "$ENV{FAKECHROOT_EXCLUDE_PATH}:/dev:/proc:/sys";
+ } else {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{FAKECHROOT_EXCLUDE_PATH} = '/dev:/proc:/sys';
+ }
+ # workaround for long unix socket path if FAKECHROOT_BASE
+ # exceeds the limit of 108 bytes
+ {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{FAKECHROOT_AF_UNIX_PATH} = "/tmp";
+ }
+ {
+ my @ldlibpath = ();
+ if (defined $ENV{LD_LIBRARY_PATH}
+ && $ENV{LD_LIBRARY_PATH} ne "") {
+ push @ldlibpath, (split /:/, $ENV{LD_LIBRARY_PATH});
+ }
+ # FIXME: workaround allowing installation of systemd should
+ # live in fakechroot, see #917920
+ push @ldlibpath, "$options->{root}/lib/systemd";
+ my $parse_ld_so_conf;
+ $parse_ld_so_conf = sub {
+ foreach my $conf (@_) {
+ next if !-r $conf;
+ open my $fh, '<', "$conf" or error "can't read $conf: $!";
+ while (my $line = <$fh>) {
+ chomp $line;
+ if ($line eq "") {
+ next;
+ }
+ if ($line =~ /^#/) {
+ next;
+ }
+ if ($line =~ /include (.*)/) {
+ $parse_ld_so_conf->(glob("$options->{root}/$1"));
+ next;
+ }
+ if (!-d "$options->{root}/$line") {
+ next;
+ }
+ push @ldlibpath, "$options->{root}/$line";
+ }
+ close $fh;
+ }
+ };
+ if (-e "$options->{root}/etc/ld.so.conf") {
+ $parse_ld_so_conf->("$options->{root}/etc/ld.so.conf");
+ }
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{LD_LIBRARY_PATH} = join ':', @ldlibpath;
+ }
+ }
+
+ # make sure that APT_CONFIG and TMPDIR are not set when executing
+ # anything inside the chroot
+ my @chrootcmd = ('env', '--unset=APT_CONFIG', '--unset=TMPDIR');
+ if (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot')) {
+ push @chrootcmd, ('chroot', $options->{root});
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ # foreign architecture setup for fakechroot mode
+ if (defined $options->{qemu} && $options->{mode} eq 'fakechroot') {
+ # Make sure that the fakeroot and fakechroot shared libraries exist for
+ # the right architecture
+ open my $fh, '-|', 'dpkg-architecture', '-a',
+ $options->{nativearch},
+ '-qDEB_HOST_MULTIARCH' // error "failed to fork(): $!";
+ chomp(
+ my $deb_host_multiarch = do { local $/; <$fh> }
+ );
+ close $fh;
+ if (($? != 0) or (!$deb_host_multiarch)) {
+ error "dpkg-architecture failed: $?";
+ }
+ my $fakechrootdir = "/usr/lib/$deb_host_multiarch/fakechroot";
+ if (!-e "$fakechrootdir/libfakechroot.so") {
+ error "$fakechrootdir/libfakechroot.so doesn't exist."
+ . " Install libfakechroot:$options->{nativearch}"
+ . " outside the chroot";
+ }
+ my $fakerootdir = "/usr/lib/$deb_host_multiarch/libfakeroot";
+ if (!-e "$fakerootdir/libfakeroot-sysv.so") {
+ error "$fakerootdir/libfakeroot-sysv.so doesn't exist."
+ . " Install libfakeroot:$options->{nativearch}"
+ . " outside the chroot";
+ }
+
+ # The rest of this block sets environment variables, so we have to add
+ # the "no critic" statement to stop perlcritic from complaining about
+ # setting global variables
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ # fakechroot only fills LD_LIBRARY_PATH with the directories of the
+ # host's architecture. We append the directories of the chroot
+ # architecture.
+ $ENV{LD_LIBRARY_PATH}
+ = "$ENV{LD_LIBRARY_PATH}:$fakechrootdir:$fakerootdir";
+ # The binfmt support on the outside is used, so qemu needs to know
+ # where it has to look for shared libraries
+ if (defined $ENV{QEMU_LD_PREFIX}
+ && $ENV{QEMU_LD_PREFIX} ne "") {
+ $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
+ } else {
+ $ENV{QEMU_LD_PREFIX} = $options->{root};
+ }
+ }
+
+ # some versions of coreutils use the renameat2 system call in mv.
+ # This breaks certain versions of fakechroot. Here we do
+ # a sanity check and warn the user in case things might break.
+ if ($options->{mode} eq 'fakechroot'
+ and -e "$options->{root}/bin/mv") {
+ mkdir "$options->{root}/000-move-me"
+ or error "cannot create directory: $!";
+ my $ret = system @chrootcmd, '/bin/mv', '/000-move-me',
+ '/001-delete-me';
+ if ($ret != 0) {
+ info "the /bin/mv binary inside the chroot doesn't"
+ . " work under fakechroot";
+ info "with certain versions of coreutils and glibc,"
+ . " this is due to missing support for renameat2 in"
+ . " fakechroot";
+ info "see https://github.com/dex4er/fakechroot/issues/60";
+ info "expect package post installation scripts not to work";
+ rmdir "$options->{root}/000-move-me"
+ or error "cannot rmdir: $!";
+ } else {
+ rmdir "$options->{root}/001-delete-me"
+ or error "cannot rmdir: $!";
+ }
+ }
+
+ return \@chrootcmd;
+}
+
+sub run_essential() {
+ my $options = shift;
+ my $essential_pkgs = shift;
+ my $chrootcmd = shift;
+ my $cached_debs = shift;
+
+ if (scalar @{$essential_pkgs} == 0) {
+ info "no essential packages -- skipping...";
+ return;
+ }
+
+ if ($options->{mode} eq 'chrootless') {
+ if ($options->{dryrun}) {
+ info "simulate installing essential packages...";
+ } else {
+ info "installing essential packages...";
+ }
+ # FIXME: the dpkg config from the host is parsed before the command
+ # line arguments are parsed and might break this mode
+ # Example: if the host has --path-exclude set, then this will also
+ # affect the chroot. See #808203
+ my @chrootless_opts = (
+ '-oDPkg::Chroot-Directory=',
+ '-oDPkg::Options::=--force-not-root',
+ '-oDPkg::Options::=--force-script-chrootless',
+ '-oDPkg::Options::=--root=' . $options->{root},
+ '-oDPkg::Options::=--log=' . "$options->{root}/var/log/dpkg.log",
+ $options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
+ );
+ if (defined $options->{qemu}) {
+ # The binfmt support on the outside is used, so qemu needs to know
+ # where it has to look for shared libraries
+ if (defined $ENV{QEMU_LD_PREFIX}
+ && $ENV{QEMU_LD_PREFIX} ne "") {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{QEMU_LD_PREFIX} = "$ENV{QEMU_LD_PREFIX}:$options->{root}";
+ } else {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{QEMU_LD_PREFIX} = $options->{root};
+ }
+ }
+ # we don't use apt because that will not run the base-passwd preinst
+ # early enough
+ #run_apt_progress({
+ # ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
+ # PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}],
+ #});
+ run_dpkg_progress({
+ ARGV => [
+ 'dpkg',
+ '--force-not-root',
+ '--force-script-chrootless',
+ "--root=$options->{root}",
+ "--log=$options->{root}/var/log/dpkg.log",
+ '--install',
+ '--force-depends'
+ ],
+ PKGS => [map { "$options->{root}/$_" } @{$essential_pkgs}] });
+ } elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot'))
+ {
+ # install the extracted packages properly
+ # we need --force-depends because dpkg does not take Pre-Depends
+ # into account and thus doesn't install them in the right order
+ # And the --predep-package option is broken: #539133
+ #
+ # We could use apt from outside the chroot using DPkg::Chroot-Directory
+ # but then the preinst script of base-passwd will not be called early
+ # enough and packages will fail to install because they are missing
+ # /etc/passwd. Also, with plain dpkg the essential variant can finish
+ # within 9 seconds. If we use apt instead, it becomes 12 seconds. We
+ # prefer speed here.
+ if ($options->{dryrun}) {
+ info "simulate installing essential packages...";
+ } else {
+ info "installing essential packages...";
+ run_dpkg_progress({
+ ARGV =>
+ [@{$chrootcmd}, 'dpkg', '--install', '--force-depends'],
+ PKGS => $essential_pkgs,
+ });
+ }
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ if (any { $_ eq 'essential/unlink' } @{ $options->{skip} }) {
+ info "skipping essential/unlink as requested";
+ } else {
+ foreach my $deb (@{$essential_pkgs}) {
+ # do not unlink those packages that were in /var/cache/apt/archive
+ # before the download phase
+ next
+ if any { "/var/cache/apt/archives/$_" eq $deb } @{$cached_debs};
+ # do not unlink those packages that were not in
+ # /var/cache/apt/archive (for example because they were provided by
+ # a file:// mirror)
+ next if $deb !~ /\/var\/cache\/apt\/archives\//;
+ unlink "$options->{root}/$deb"
+ or error "cannot unlink $deb: $!";
+ }
+ }
+
+ return;
+}
+
+sub run_install() {
+ my $options = shift;
+
+ my @pkgs_to_install = (@{ $options->{include} });
+ if ($options->{variant} eq 'buildd') {
+ push @pkgs_to_install, 'build-essential', 'apt';
+ }
+ if (any { $_ eq $options->{variant} }
+ ('required', 'important', 'standard')) {
+ # Many of the priority:required packages are also essential:yes. We
+ # make sure not to select those here to avoid useless "xxx is already
+ # the newest version" messages.
+ my $priority;
+ if (any { $_ eq $options->{variant} } ('required')) {
+ $priority = '?and(?priority(required),?not(?essential))';
+ } elsif ($options->{variant} eq 'important') {
+ $priority = '?and(?or(?priority(required),?priority(important)),'
+ . '?not(?essential))';
+ } elsif ($options->{variant} eq 'standard') {
+ $priority = '?and(?or(~prequired,~pimportant,~pstandard),'
+ . '?not(?essential))';
+ }
+ push @pkgs_to_install,
+ (
+ "?narrow("
+ . (
+ length($options->{suite})
+ ? '?or(?archive(^'
+ . $options->{suite}
+ . '$),?codename(^'
+ . $options->{suite} . '$)),'
+ : ''
+ )
+ . "?architecture($options->{nativearch}),"
+ . "$priority)"
+ );
+ }
+
+ if ($options->{mode} eq 'chrootless') {
+ if (scalar @pkgs_to_install > 0) {
+ my @chrootless_opts = (
+ '-oDPkg::Chroot-Directory=',
+ '-oDPkg::Options::=--force-not-root',
+ '-oDPkg::Options::=--force-script-chrootless',
+ '-oDPkg::Options::=--root=' . $options->{root},
+ '-oDPkg::Options::=--log='
+ . "$options->{root}/var/log/dpkg.log",
+ $options->{dryrun} ? '-oAPT::Get::Simulate=true' : (),
+ );
+ run_apt_progress({
+ ARGV => ['apt-get', '--yes', @chrootless_opts, 'install'],
+ PKGS => [@pkgs_to_install],
+ });
+ }
+ } elsif (any { $_ eq $options->{mode} } ('root', 'unshare', 'fakechroot'))
+ {
+ if ($options->{variant} ne 'custom'
+ and scalar @pkgs_to_install > 0) {
+ # Advantage of running apt on the outside instead of inside the
+ # chroot:
+ #
+ # - we can build chroots without apt (for example from buildinfo
+ # files)
+ #
+ # - we do not need to install additional packages like
+ # apt-transport-* or ca-certificates inside the chroot
+ #
+ # - we do not not need additional key material inside the chroot
+ #
+ # - we can make use of file:// and copy://
+ #
+ # - we can use EDSP solvers without installing apt-utils or other
+ # solvers inside the chroot
+ #
+ # The DPkg::Install::Recursive::force=true workaround can be
+ # dropped after this issue is fixed:
+ # https://salsa.debian.org/apt-team/apt/-/merge_requests/189
+ #
+ # We could also move the dpkg call to the outside and run dpkg with
+ # --root but this would only make sense in situations where there
+ # is no dpkg inside the chroot.
+ if (!$options->{dryrun}) {
+ info "installing remaining packages inside the chroot...";
+ run_apt_progress({
+ ARGV => [
+ 'apt-get',
+ '-o',
+ 'Dir::Bin::dpkg=env',
+ '-o',
+ 'DPkg::Options::=--unset=TMPDIR',
+ '-o',
+ 'DPkg::Options::=dpkg',
+ $options->{mode} eq 'fakechroot'
+ ? ('-o', 'DPkg::Install::Recursive::force=true')
+ : (),
+ '--yes',
+ 'install'
+ ],
+ PKGS => [@pkgs_to_install],
+ });
+ } else {
+ info "simulate installing remaining packages inside the"
+ . " chroot...";
+ run_apt_progress({
+ ARGV => [
+ 'apt-get', '--yes',
+ '-oAPT::Get::Simulate=true', 'install'
+ ],
+ PKGS => [@pkgs_to_install],
+ });
+ }
+ }
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ return;
+}
+
+sub run_cleanup() {
+ my $options = shift;
+
+ if (any { $_ eq 'cleanup/apt' } @{ $options->{skip} }) {
+ info "skipping cleanup/apt as requested";
+ } else {
+ if ( none { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }
+ and none { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
+ info "cleaning package lists and apt cache...";
+ }
+ if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
+ info "skipping cleanup/apt/lists as requested";
+ } else {
+ if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
+ info "cleaning package lists...";
+ }
+ run_apt_progress({
+ ARGV => [
+ 'apt-get', '--option',
+ 'Dir::Etc::SourceList=/dev/null', '--option',
+ 'Dir::Etc::SourceParts=/dev/null', 'update'
+ ],
+ CHDIR => $options->{root},
+ });
+ }
+ if (any { $_ eq 'cleanup/apt/cache' } @{ $options->{skip} }) {
+ info "skipping cleanup/apt/cache as requested";
+ } else {
+ if (any { $_ eq 'cleanup/apt/lists' } @{ $options->{skip} }) {
+ info "cleaning apt cache...";
+ }
+ run_apt_progress(
+ { ARGV => ['apt-get', 'clean'], CHDIR => $options->{root} });
+ }
+
+ # apt since 1.6 creates the auxfiles directory. If apt inside the
+ # chroot is older than that, then it will not know how to clean it.
+ if (-e "$options->{root}/var/lib/apt/lists/auxfiles") {
+ 0 == system(
+ 'rm',
+ '--interactive=never',
+ '--recursive',
+ '--preserve-root',
+ '--one-file-system',
+ "$options->{root}/var/lib/apt/lists/auxfiles"
+ ) or error "rm failed: $?";
+ }
+ }
+
+ if (any { $_ eq 'cleanup/mmdebstrap' } @{ $options->{skip} }) {
+ info "skipping cleanup/mmdebstrap as requested";
+ } else {
+ # clean up temporary configuration file
+ unlink "$options->{root}/etc/apt/apt.conf.d/00mmdebstrap"
+ or warning "failed to unlink /etc/apt/apt.conf.d/00mmdebstrap: $!";
+
+ if (defined $ENV{APT_CONFIG} && -e $ENV{APT_CONFIG}) {
+ unlink $ENV{APT_CONFIG}
+ or error "failed to unlink $ENV{APT_CONFIG}: $!";
+ }
+ }
+
+ if (any { $_ eq 'cleanup/reproducible' } @{ $options->{skip} }) {
+ info "skipping cleanup/reproducible as requested";
+ } else {
+ # clean up certain files to make output reproducible
+ foreach my $fname (
+ '/var/log/dpkg.log', '/var/log/apt/history.log',
+ '/var/log/apt/term.log', '/var/log/alternatives.log',
+ '/var/cache/ldconfig/aux-cache', '/var/log/apt/eipp.log.xz',
+ '/var/lib/dbus/machine-id'
+ ) {
+ my $path = "$options->{root}$fname";
+ if (!-e $path) {
+ next;
+ }
+ unlink $path or error "cannot unlink $path: $!";
+ }
+
+ if (-e "$options->{root}/etc/machine-id") {
+ # from machine-id(5):
+ # For operating system images which are created once and used on
+ # multiple machines, for example for containers or in the cloud,
+ # /etc/machine-id should be an empty file in the generic file
+ # system image. An ID will be generated during boot and saved to
+ # this file if possible. Having an empty file in place is useful
+ # because it allows a temporary file to be bind-mounted over the
+ # real file, in case the image is used read-only.
+ unlink "$options->{root}/etc/machine-id"
+ or error "cannot unlink /etc/machine-id: $!";
+ open my $fh, '>', "$options->{root}/etc/machine-id"
+ or error "failed to open(): $!";
+ close $fh;
+ }
+ }
+
+ if (any { $_ eq 'cleanup/run' } @{ $options->{skip} }) {
+ info "skipping cleanup/run as requested";
+ } else {
+ # remove any possible leftovers in /run
+ if (-d "$options->{root}/run") {
+ opendir(my $dh, "$options->{root}/run")
+ or error "Can't opendir($options->{root}/run): $!";
+ while (my $entry = readdir $dh) {
+ # skip the "." and ".." entries
+ next if $entry eq ".";
+ next if $entry eq "..";
+ # skip deleting /run/lock as /var/lock is a symlink to it
+ # according to Debian policy §9.1.4
+ next if $entry eq "lock";
+ debug "deleting files in /run: $entry";
+ 0 == system(
+ 'rm', '--interactive=never',
+ '--recursive', '--preserve-root',
+ '--one-file-system', "$options->{root}/run/$entry"
+ ) or error "rm failed: $?";
+ }
+ closedir($dh);
+ }
+ }
+ if (any { $_ eq 'cleanup/tmp' } @{ $options->{skip} }) {
+ info "skipping cleanup/tmp as requested";
+ } else {
+ # remove any possible leftovers in /tmp
+ if (-d "$options->{root}/tmp") {
+ opendir(my $dh, "$options->{root}/tmp")
+ or error "Can't opendir($options->{root}/tmp): $!";
+ while (my $entry = readdir $dh) {
+ # skip the "." and ".." entries
+ next if $entry eq ".";
+ next if $entry eq "..";
+ debug "deleting files in /tmp: $entry";
+ 0 == system(
+ 'rm', '--interactive=never',
+ '--recursive', '--preserve-root',
+ '--one-file-system', "$options->{root}/tmp/$entry"
+ ) or error "rm failed: $?";
+ }
+ closedir($dh);
+ }
+ }
+
+ if (any { $_ eq 'cleanup/dev' } @{ $options->{skip} }) {
+ info "skipping cleanup/dev as requested";
+ } else {
+
+ # By default, tar is run with --exclude=./dev because we create the
+ # ./dev entries ourselves using @devfiles. But if --skip=output/dev is
+ # used, --exclude=./dev is not passed so that the chroot includes ./dev
+ # as created by base-files. But if mknod was available (for example
+ # when running as root) then ./dev will also include the @devfiles
+ # entries created by run_setup() and thus the resulting tarball will
+ # include things inside ./dev despite the user having supplied
+ # --skip=output/dev. So if --skip=output/dev was passed and if a
+ # tarball is to be created, we need to make sure to clean up the
+ # ./dev entries that were created in run_setup(). This is not done
+ # when creating a directory because in that case we want to do the
+ # same as debootstrap and create a directory including device nodes.
+ if ($options->{format} ne 'directory' && any { $_ eq 'output/dev' }
+ @{ $options->{skip} }) {
+ foreach my $file (@devfiles) {
+ my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
+ = @{$file};
+ if (!-e "$options->{root}/dev/$fname") {
+ next;
+ }
+ # do not remove ./dev itself
+ if ($fname eq "") {
+ next;
+ }
+ if ($type == 0) { # normal file
+ error "type 0 not implemented";
+ } elsif ($type == 1) { # hardlink
+ error "type 1 not implemented";
+ } elsif (any { $_ eq $type } (2, 3, 4))
+ { # symlink, char, block
+ unlink "$options->{root}/dev/$fname"
+ or error "failed to unlink ./dev/$fname: $!";
+ } elsif ($type == 5) { # directory
+ rmdir "$options->{root}/dev/$fname"
+ or error "failed to unlink ./dev/$fname: $!";
+ } else {
+ error "unsupported type: $type";
+ }
+ }
+ }
+ }
+ return;
+}
+
+# messages from process inside unshared namespace to the outside
+# openw -- open file for writing
+# untar -- extract tar into directory
+# write -- write data to last opened file or tar process
+# close -- finish file writing or tar extraction
+# adios -- last message and tear-down
+# messages from process outside unshared namespace to the inside
+# okthx -- success
+sub checkokthx {
+ my $fh = shift;
+ my $ret = read($fh, my $buf, 2 + 5) // error "cannot read from socket: $!";
+ if ($ret == 0) { error "received eof on socket"; }
+ my ($len, $msg) = unpack("nA5", $buf);
+ if ($msg ne "okthx") { error "expected okthx but got: $msg"; }
+ if ($len != 0) { error "expected no payload but got $len bytes"; }
+ return;
+}
+
+# resolve a path inside a chroot
+sub chrooted_realpath {
+ my $root = shift;
+ my $src = shift;
+ my $result = $root;
+ my $prefix;
+
+ # relative paths are relative to the root of the chroot
+ # remove prefixed slashes
+ $src =~ s{^/+}{};
+ my $loop = 0;
+ while (length $src) {
+ if ($loop > 25) {
+ error "too many levels of symbolic links";
+ }
+ # Get the first directory component.
+ ($prefix, $src) = split m{/+}, $src, 2;
+ # Resolve the first directory component.
+ if ($prefix eq ".") {
+ # Ignore, stay at the same directory.
+ } elsif ($prefix eq "..") {
+ # Go up one directory.
+ $result =~ s{(.*)/[^/]*}{$1};
+ # but not further than the root
+ if ($result !~ m/^\Q$root\E/) {
+ $result = $root;
+ }
+ } elsif (-l "$result/$prefix") {
+ my $dst = readlink "$result/$prefix";
+ if ($dst =~ s{^/+}{}) {
+ # Absolute pathname, reset result back to $root.
+ $result = $root;
+ }
+ $src = length $src ? "$dst/$src" : $dst;
+ $loop++;
+ } else {
+ # Otherwise append the prefix.
+ $result = "$result/$prefix";
+ }
+ }
+ return $result;
+}
+
+sub pivot_root {
+ my $root = shift;
+ my $target = "/mnt";
+ my $put_old = "tmp";
+ 0 == syscall &SYS_mount, $root, $target, 0, $MS_REC | $MS_BIND, 0
+ or error "mount failed: $!";
+ chdir "/mnt" or error "failed chdir() to /mnt: $!";
+ 0 == syscall &SYS_pivot_root, my $new_root = ".", $put_old
+ or error "pivot_root failed: $!";
+ chroot "." or error "failed to chroot() to .: $!";
+ 0 == syscall &SYS_umount2, $put_old, $MNT_DETACH
+ or error "umount2 failed: $!";
+ 0 == syscall &SYS_umount2, my $sys = "sys", $MNT_DETACH
+ or error "umount2 failed: $!";
+ return;
+}
+
+sub hookhelper {
+ my ($root, $mode, $hook, $skipopt, $verbosity, $command, @args) = @_;
+ $verbosity_level = $verbosity;
+ my @skipopts = ();
+ if (length $skipopt) {
+ for my $skip (split /[,\s]+/, $skipopt) {
+ # strip leading and trailing whitespace
+ $skip =~ s/^\s+|\s+$//g;
+ # skip if the remainder is an empty string
+ if ($skip eq '') {
+ next;
+ }
+ push @skipopts, $skip;
+ }
+ }
+ # we put everything in an eval block because that way we can easily handle
+ # errors without goto labels or much code duplication: the error handler
+ # has to send an "error" message to the other side
+ eval {
+
+ my @cmdprefix = ();
+ my @tarcmd = (
+ 'tar', '--numeric-owner', '--xattrs', '--format=pax',
+ '--pax-option=exthdr.name=%d/PaxHeaders/%f,'
+ . 'delete=atime,delete=ctime'
+ );
+ if ($hook eq 'setup') {
+ } elsif (any { $_ eq $hook } ('extract', 'essential', 'customize')) {
+ if ($mode eq 'fakechroot') {
+ # Fakechroot requires tar to run inside the chroot or
+ # otherwise absolute symlinks will include the path to the
+ # root directory
+ push @cmdprefix, 'chroot', $root;
+ } elsif (any { $_ eq $mode } ('root', 'chrootless', 'unshare')) {
+ # not chrooting in this case
+ } else {
+ error "unknown mode: $mode";
+ }
+ } else {
+ error "unknown hook: $hook";
+ }
+
+ if (any { $_ eq $command } ('copy-in', 'tar-in', 'upload', 'sync-in'))
+ {
+ if (scalar @args < 2) {
+ error "$command needs at least one path on the"
+ . " outside and the output path inside the chroot";
+ }
+ my $outpath = pop @args;
+ foreach my $file (@args) {
+ # the right argument for tar's --directory argument depends on
+ # whether tar is called from inside the chroot or from the
+ # outside
+ my $directory;
+ if ($hook eq 'setup') {
+ # tar runs outside, so acquire the correct path
+ $directory = chrooted_realpath $root, $outpath;
+ } elsif (any { $_ eq $hook }
+ ('extract', 'essential', 'customize')) {
+ if ($mode eq 'fakechroot') {
+ # tar will run inside the chroot
+ $directory = $outpath;
+ } elsif (any { $_ eq $mode }
+ ('root', 'chrootless', 'unshare')) {
+ $directory = chrooted_realpath $root, $outpath;
+ } else {
+ error "unknown mode: $mode";
+ }
+ } else {
+ error "unknown hook: $hook";
+ }
+
+ # if chrooted_realpath was used and if fakechroot
+ # was used (absolute symlinks will be broken) we can
+ # check and potentially fail early if the target does not exist
+ if ($mode ne 'fakechroot') {
+ my $dirtocheck = $directory;
+ if ($command eq 'upload') {
+ # check the parent directory instead
+ $dirtocheck =~ s/(.*)\/[^\/]*/$1/;
+ }
+ if (!-e $dirtocheck) {
+ error "path does not exist: $dirtocheck";
+ }
+ if (!-d $dirtocheck) {
+ error "path is not a directory: $dirtocheck";
+ }
+ }
+
+ my $fh;
+ if ($command eq 'upload') {
+ # open the requested file for writing
+ open $fh, '|-', @cmdprefix, 'sh', '-c', 'cat > "$1"',
+ 'exec', $directory // error "failed to fork(): $!";
+ } elsif (any { $_ eq $command }
+ ('copy-in', 'tar-in', 'sync-in')) {
+ # open a tar process that extracts the tarfile that we
+ # supply it with on stdin to the output directory inside
+ # the chroot
+ my @cmd = (
+ @cmdprefix, @tarcmd, '--xattrs-include=*',
+ '--directory', $directory, '--extract', '--file', '-'
+ );
+ # go via mmtarfilter if copy-in/mknod, tar-in/mknod or
+ # sync-in/mknod were part of the skip options
+ if (any { $_ eq "$command/mknod" } @skipopts) {
+ info "skipping $command/mknod as requested";
+ my $tarfilter = "mmtarfilter";
+ if (-x "./tarfilter") {
+ $tarfilter = "./tarfilter";
+ }
+ pipe my $filter_reader, $fh or error "pipe failed: $!";
+ pipe my $tar_reader, my $filter_writer
+ or error "pipe failed: $!";
+ my $pid1 = fork() // error "fork() failed: $!";
+ if ($pid1 == 0) {
+ open(STDIN, '<&', $filter_reader)
+ or error "cannot open STDIN: $!";
+ open(STDOUT, '>&', $filter_writer)
+ or error "cannot open STDOUT: $!";
+ close($tar_reader)
+ or error "cannot close tar_reader: $!";
+ debug(
+ "helper: running $tarfilter --type-exclude=3 "
+ . "--type-exclude=4");
+ eval { Devel::Cover::set_coverage("none") }
+ if $is_covering;
+ exec $tarfilter, '--type-exclude=3',
+ '--type-exclude=4';
+ }
+ my $pid2 = fork() // error "fork() failed: $!";
+ if ($pid2 == 0) {
+ open(STDIN, '<&', $tar_reader)
+ or error "cannot open STDIN: $!";
+ close($filter_writer)
+ or error "cannot close filter_writer: $!";
+ debug("helper: running " . (join " ", @cmd));
+ eval { Devel::Cover::set_coverage("none") }
+ if $is_covering;
+ exec { $cmd[0] } @cmd;
+ }
+ } else {
+ debug("helper: running " . (join " ", @cmd));
+ open($fh, '|-', @cmd) // error "failed to fork(): $!";
+ }
+ } else {
+ error "unknown command: $command";
+ }
+
+ if ($command eq 'copy-in') {
+ # instruct the parent process to create a tarball of the
+ # requested path outside the chroot
+ debug "helper: sending mktar";
+ print STDOUT (pack("n", length $file) . "mktar" . $file);
+ } elsif ($command eq 'sync-in') {
+ # instruct the parent process to create a tarball of the
+ # content of the requested path outside the chroot
+ debug "helper: sending mktac";
+ print STDOUT (pack("n", length $file) . "mktac" . $file);
+ } elsif (any { $_ eq $command } ('upload', 'tar-in')) {
+ # instruct parent process to open a tarball of the
+ # requested path outside the chroot for reading
+ debug "helper: sending openr";
+ print STDOUT (pack("n", length $file) . "openr" . $file);
+ } else {
+ error "unknown command: $command";
+ }
+ STDOUT->flush();
+ debug "helper: waiting for okthx";
+ checkokthx \*STDIN;
+
+ # handle "write" messages from the parent process and feed
+ # their payload into the tar process until a "close" message
+ # is encountered
+ while (1) {
+ # receive the next message
+ my $ret = read(STDIN, my $buf, 2 + 5)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ my ($len, $msg) = unpack("nA5", $buf);
+ debug "helper: received message: $msg";
+ if ($msg eq "close") {
+ # finish the loop
+ if ($len != 0) {
+ error "expected no payload but got $len bytes";
+ }
+ debug "helper: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ last;
+ } elsif ($msg ne "write") {
+ error "expected write but got: $msg";
+ }
+ # read the payload
+ my $content;
+ {
+ my $ret = read(STDIN, $content, $len)
+ // error "error cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # write the payload to the tar process
+ print $fh $content
+ or error "cannot write to tar process: $!";
+ debug "helper: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ }
+ close $fh;
+ if ($command ne 'upload' and $? != 0) {
+ error "tar failed";
+ }
+ }
+ } elsif (any { $_ eq $command }
+ ('copy-out', 'tar-out', 'download', 'sync-out')) {
+ if (scalar @args < 2) {
+ error "$command needs at least one path inside the chroot and"
+ . " the output path on the outside";
+ }
+ my $outpath = pop @args;
+ foreach my $file (@args) {
+ # the right argument for tar's --directory argument depends on
+ # whether tar is called from inside the chroot or from the
+ # outside
+ my $directory;
+ if ($hook eq 'setup') {
+ # tar runs outside, so acquire the correct path
+ $directory = chrooted_realpath $root, $file;
+ } elsif (any { $_ eq $hook }
+ ('extract', 'essential', 'customize')) {
+ if ($mode eq 'fakechroot') {
+ # tar will run inside the chroot
+ $directory = $file;
+ } elsif (any { $_ eq $mode }
+ ('root', 'chrootless', 'unshare')) {
+ $directory = chrooted_realpath $root, $file;
+ } else {
+ error "unknown mode: $mode";
+ }
+ } else {
+ error "unknown hook: $hook";
+ }
+
+ # if chrooted_realpath was used and if fakechroot
+ # was used (absolute symlinks will be broken) we can
+ # check and potentially fail early if the source does not exist
+ if ($mode ne 'fakechroot') {
+ if (!-e $directory) {
+ error "path does not exist: $directory";
+ }
+ if ($command eq 'download') {
+ if (!-f $directory) {
+ error "path is not a file: $directory";
+ }
+ }
+ }
+
+ my $fh;
+ if ($command eq 'download') {
+ # open the requested file for reading
+ open $fh, '-|', @cmdprefix, 'sh', '-c', 'cat "$1"',
+ 'exec', $directory // error "failed to fork(): $!";
+ } elsif ($command eq 'sync-out') {
+ # Open a tar process that creates a tarfile of everything
+ # inside the requested directory inside the chroot and
+ # writes it to stdout.
+ my @cmd = (
+ @cmdprefix, @tarcmd, '--directory',
+ $directory, '--create', '--file', '-', '.'
+ );
+ debug("helper: running " . (join " ", @cmd));
+ open($fh, '-|', @cmd) // error "failed to fork(): $!";
+ } elsif (any { $_ eq $command } ('copy-out', 'tar-out')) {
+ # Open a tar process that creates a tarfile of the
+ # requested directory inside the chroot and writes it to
+ # stdout. To emulate the behaviour of cp, change to the
+ # dirname of the requested path first.
+ my @cmd = (
+ @cmdprefix, @tarcmd, '--directory',
+ dirname($directory), '--create', '--file', '-',
+ basename($directory));
+ debug("helper: running " . (join " ", @cmd));
+ open($fh, '-|', @cmd) // error "failed to fork(): $!";
+ } else {
+ error "unknown command: $command";
+ }
+
+ if (any { $_ eq $command } ('copy-out', 'sync-out')) {
+ # instruct the parent process to extract a tarball to a
+ # certain path outside the chroot
+ debug "helper: sending untar";
+ print STDOUT (
+ pack("n", length $outpath) . "untar" . $outpath);
+ } elsif (any { $_ eq $command } ('download', 'tar-out')) {
+ # instruct parent process to open a tarball of the
+ # requested path outside the chroot for writing
+ debug "helper: sending openw";
+ print STDOUT (
+ pack("n", length $outpath) . "openw" . $outpath);
+ } else {
+ error "unknown command: $command";
+ }
+ STDOUT->flush();
+ debug "helper: waiting for okthx";
+ checkokthx \*STDIN;
+
+ # read from the tar process and send as payload to the parent
+ # process
+ while (1) {
+ # read from tar
+ my $ret = read($fh, my $cont, 4096)
+ // error "cannot read from pipe: $!";
+ if ($ret == 0) { last; }
+ debug "helper: sending write";
+ # send to parent
+ print STDOUT pack("n", $ret) . "write" . $cont;
+ STDOUT->flush();
+ debug "helper: waiting for okthx";
+ checkokthx \*STDIN;
+ if ($ret < 4096) { last; }
+ }
+
+ # signal to the parent process that we are done
+ debug "helper: sending close";
+ print STDOUT pack("n", 0) . "close";
+ STDOUT->flush();
+ debug "helper: waiting for okthx";
+ checkokthx \*STDIN;
+
+ close $fh;
+ if ($? != 0) {
+ error "$command failed";
+ }
+ }
+ } else {
+ error "unknown command: $command";
+ }
+ };
+ if ($@) {
+ # inform the other side that something went wrong
+ print STDOUT (pack("n", 0) . "error");
+ STDOUT->flush();
+ error "hookhelper failed: $@";
+ }
+ return;
+}
+
+sub hooklistener {
+ $verbosity_level = shift;
+ # we put everything in an eval block because that way we can easily handle
+ # errors without goto labels or much code duplication: the error handler
+ # has to send an "error" message to the other side
+ eval {
+ while (1) {
+ # get the next message
+ my $msg = "error";
+ my $len = -1;
+ {
+ debug "listener: reading next command";
+ my $ret = read(STDIN, my $buf, 2 + 5)
+ // error "cannot read from socket: $!";
+ debug "listener: finished reading command";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ ($len, $msg) = unpack("nA5", $buf);
+ }
+ if ($msg eq "adios") {
+ debug "listener: received message: adios";
+ # setup finished, so we break out of the loop
+ if ($len != 0) {
+ error "expected no payload but got $len bytes";
+ }
+ last;
+ } elsif ($msg eq "openr") {
+ # handle the openr message
+ debug "listener: received message: openr";
+ my $infile;
+ {
+ my $ret = read(STDIN, $infile, $len)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # make sure that the requested path exists outside the chroot
+ if (!-e $infile) {
+ error "$infile does not exist";
+ }
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+
+ open my $fh, '<', $infile
+ or error "failed to open $infile for reading: $!";
+
+ # read from the file and send as payload to the child process
+ while (1) {
+ # read from file
+ my $ret = read($fh, my $cont, 4096)
+ // error "cannot read from pipe: $!";
+ if ($ret == 0) { last; }
+ debug "listener: sending write";
+ # send to child
+ print STDOUT pack("n", $ret) . "write" . $cont;
+ STDOUT->flush();
+ debug "listener: waiting for okthx";
+ checkokthx \*STDIN;
+ if ($ret < 4096) { last; }
+ }
+
+ # signal to the child process that we are done
+ debug "listener: sending close";
+ print STDOUT pack("n", 0) . "close";
+ STDOUT->flush();
+ debug "listener: waiting for okthx";
+ checkokthx \*STDIN;
+
+ close $fh;
+ } elsif ($msg eq "openw") {
+ debug "listener: received message: openw";
+ # payload is the output directory
+ my $outfile;
+ {
+ my $ret = read(STDIN, $outfile, $len)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # make sure that the directory exists
+ my $outdir = dirname($outfile);
+ if (-e $outdir) {
+ if (!-d $outdir) {
+ error "$outdir already exists but is not a directory";
+ }
+ } else {
+ my $num_created = make_path $outdir, { error => \my $err };
+ if ($err && @$err) {
+ error(
+ join "; ",
+ (
+ map { "cannot create " . (join ": ", %{$_}) }
+ @$err
+ ));
+ } elsif ($num_created == 0) {
+ error "cannot create $outdir";
+ }
+ }
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+
+ # now we expect one or more "write" messages containing the
+ # tarball to write
+ open my $fh, '>', $outfile
+ or error "failed to open $outfile for writing: $!";
+
+ # handle "write" messages from the child process and feed
+ # their payload into the file handle until a "close" message
+ # is encountered
+ while (1) {
+ # receive the next message
+ my $ret = read(STDIN, my $buf, 2 + 5)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ my ($len, $msg) = unpack("nA5", $buf);
+ debug "listener: received message: $msg";
+ if ($msg eq "close") {
+ # finish the loop
+ if ($len != 0) {
+ error "expected no payload but got $len bytes";
+ }
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ last;
+ } elsif ($msg ne "write") {
+ # we should not receive this message at this point
+ error "expected write but got: $msg";
+ }
+ # read the payload
+ my $content;
+ {
+ my $ret = read(STDIN, $content, $len)
+ // error "error cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # write the payload to the file handle
+ print $fh $content
+ or error "cannot write to file handle: $!";
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ }
+ close $fh;
+ } elsif (any { $_ eq $msg } ('mktar', 'mktac')) {
+ # handle the mktar message
+ debug "listener: received message: $msg";
+ my $indir;
+ {
+ my $ret = read(STDIN, $indir, $len)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # make sure that the requested path exists outside the chroot
+ if (!-e $indir) {
+ error "$indir does not exist";
+ }
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+
+ # Open a tar process creating a tarfile of the instructed
+ # path. To emulate the behaviour of cp, change to the
+ # dirname of the requested path first.
+ my @cmd = (
+ 'tar',
+ '--numeric-owner',
+ '--xattrs',
+ '--format=pax',
+ '--pax-option=exthdr.name=%d/PaxHeaders/%f,'
+ . 'delete=atime,delete=ctime',
+ '--directory',
+ $msg eq 'mktar' ? dirname($indir) : $indir,
+ '--create',
+ '--file',
+ '-',
+ $msg eq 'mktar' ? basename($indir) : '.'
+ );
+ debug("listener: running " . (join " ", @cmd));
+ open(my $fh, '-|', @cmd) // error "failed to fork(): $!";
+
+ # read from the tar process and send as payload to the child
+ # process
+ while (1) {
+ # read from tar
+ my $ret = read($fh, my $cont, 4096)
+ // error "cannot read from pipe: $!";
+ if ($ret == 0) { last; }
+ debug "listener: sending write ($ret bytes)";
+ # send to child
+ print STDOUT pack("n", $ret) . "write" . $cont;
+ STDOUT->flush();
+ debug "listener: waiting for okthx";
+ checkokthx \*STDIN;
+ if ($ret < 4096) { last; }
+ }
+
+ # signal to the child process that we are done
+ debug "listener: sending close";
+ print STDOUT pack("n", 0) . "close";
+ STDOUT->flush();
+ debug "listener: waiting for okthx";
+ checkokthx \*STDIN;
+
+ close $fh;
+ if ($? != 0) {
+ error "tar failed";
+ }
+ } elsif ($msg eq "untar") {
+ debug "listener: received message: untar";
+ # payload is the output directory
+ my $outdir;
+ {
+ my $ret = read(STDIN, $outdir, $len)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # make sure that the directory exists
+ if (-e $outdir) {
+ if (!-d $outdir) {
+ error "$outdir already exists but is not a directory";
+ }
+ } else {
+ my $num_created = make_path $outdir, { error => \my $err };
+ if ($err && @$err) {
+ error(
+ join "; ",
+ (
+ map { "cannot create " . (join ": ", %{$_}) }
+ @$err
+ ));
+ } elsif ($num_created == 0) {
+ error "cannot create $outdir";
+ }
+ }
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+
+ # now we expect one or more "write" messages containing the
+ # tarball to unpack
+ open my $fh, '|-', 'tar', '--numeric-owner', '--xattrs',
+ '--xattrs-include=*', '--directory', $outdir,
+ '--extract', '--file',
+ '-' // error "failed to fork(): $!";
+
+ # handle "write" messages from the child process and feed
+ # their payload into the tar process until a "close" message
+ # is encountered
+ while (1) {
+ # receive the next message
+ my $ret = read(STDIN, my $buf, 2 + 5)
+ // error "cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ my ($len, $msg) = unpack("nA5", $buf);
+ debug "listener: received message: $msg";
+ if ($msg eq "close") {
+ # finish the loop
+ if ($len != 0) {
+ error "expected no payload but got $len bytes";
+ }
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ last;
+ } elsif ($msg ne "write") {
+ # we should not receive this message at this point
+ error "expected write but got: $msg";
+ }
+ # read the payload
+ my $content;
+ {
+ my $ret = read(STDIN, $content, $len)
+ // error "error cannot read from socket: $!";
+ if ($ret == 0) {
+ error "received eof on socket";
+ }
+ }
+ # write the payload to the tar process
+ print $fh $content
+ or error "cannot write to tar process: $!";
+ debug "listener: sending okthx";
+ print STDOUT (pack("n", 0) . "okthx")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ }
+ close $fh;
+ if ($? != 0) {
+ error "tar failed";
+ }
+ } elsif ($msg eq "error") {
+ error "received error on socket";
+ } else {
+ error "unknown message: $msg";
+ }
+ }
+ };
+ if ($@) {
+ warning("hooklistener errored out: $@");
+ # inform the other side that something went wrong
+ print STDOUT (pack("n", 0) . "error")
+ or error "cannot write to socket: $!";
+ STDOUT->flush();
+ }
+ return;
+}
+
+# parse files of the format found in /usr/share/distro-info/ and return two
+# lists: the first contains codenames of end-of-life distros and the second
+# list contains codenames of currently active distros
+sub parse_distro_info {
+ my $file = shift;
+ my @eol = ();
+ my @current = ();
+ my $today = POSIX::strftime "%Y-%m-%d", localtime;
+ open my $fh, '<', $file or error "cannot open $file: $!";
+ my $i = 0;
+ while (my $line = <$fh>) {
+ chomp($line);
+ $i++;
+ my @cells = split /,/, $line;
+ if (scalar @cells < 4) {
+ error "cannot parse line $i of $file";
+ }
+ if (
+ $i == 1
+ and ( scalar @cells < 6
+ or $cells[0] ne 'version'
+ or $cells[1] ne 'codename'
+ or $cells[2] ne 'series'
+ or $cells[3] ne 'created'
+ or $cells[4] ne 'release'
+ or $cells[5] ne 'eol')
+ ) {
+ error "cannot find correct header in $file";
+ }
+ if ($i == 1) {
+ next;
+ }
+ if (scalar @cells == 6) {
+ if ($cells[5] !~ m/^\d\d\d\d-\d\d-\d\d$/) {
+ error "invalid eof date format in $file:$i: $cells[5]";
+ }
+ # since the date format is iso8601, we can use lexicographic string
+ # comparison to compare dates
+ if ($cells[5] lt $today) {
+ push @eol, $cells[2];
+ } else {
+ push @current, $cells[2];
+ }
+ } else {
+ push @current, $cells[2];
+ }
+ }
+ close $fh;
+ return ([@eol], [@current]);
+}
+
+sub get_suite_by_vendor {
+ my %suite_by_vendor = (
+ 'debian' => {},
+ 'ubuntu' => {},
+ 'tanglu' => {},
+ 'kali' => {},
+ );
+
+ # pre-fill with some known values
+ foreach my $suite (
+ 'potato', 'woody', 'sarge', 'etch',
+ 'lenny', 'squeeze', 'wheezy', 'jessie'
+ ) {
+ $suite_by_vendor{'debian'}->{$suite} = 1;
+ }
+ foreach my $suite (
+ 'unstable', 'stable', 'oldstable', 'stretch',
+ 'buster', 'bullseye', 'bookworm', 'trixie'
+ ) {
+ $suite_by_vendor{'debian'}->{$suite} = 0;
+ }
+ foreach my $suite ('aequorea', 'bartholomea', 'chromodoris', 'dasyatis') {
+ $suite_by_vendor{'tanglu'}->{$suite} = 0;
+ }
+ foreach my $suite ('kali-dev', 'kali-rolling', 'kali-bleeding-edge') {
+ $suite_by_vendor{'kali'}->{$suite} = 0;
+ }
+ foreach
+ my $suite ('trusty', 'xenial', 'zesty', 'artful', 'bionic', 'cosmic') {
+ $suite_by_vendor{'ubuntu'}->{$suite} = 0;
+ }
+ # if the Debian package distro-info-data is installed, then we can use it,
+ # to get better data about new distros or EOL distros
+ if (-e '/usr/share/distro-info/debian.csv') {
+ my ($eol, $current)
+ = parse_distro_info('/usr/share/distro-info/debian.csv');
+ foreach my $suite (@{$eol}) {
+ $suite_by_vendor{'debian'}->{$suite} = 1;
+ }
+ foreach my $suite (@{$current}) {
+ $suite_by_vendor{'debian'}->{$suite} = 0;
+ }
+ }
+ if (-e '/usr/share/distro-info/ubuntu.csv') {
+ my ($eol, $current)
+ = parse_distro_info('/usr/share/distro-info/ubuntu.csv');
+ foreach my $suite (@{$eol}, @{$current}) {
+ $suite_by_vendor{'ubuntu'}->{$suite} = 0;
+ }
+ }
+ # if debootstrap is installed we infer distro names from the symlink
+ # targets of the scripts in /usr/share/debootstrap/scripts/
+ my $debootstrap_scripts = '/usr/share/debootstrap/scripts/';
+ if (-d $debootstrap_scripts) {
+ opendir(my $dh, $debootstrap_scripts)
+ or error "Can't opendir($debootstrap_scripts): $!";
+ while (my $suite = readdir $dh) {
+ # this is only a heuristic -- don't overwrite anything but instead
+ # just update anything that was missing
+ if (!-l "$debootstrap_scripts/$suite") {
+ next;
+ }
+ my $target = readlink "$debootstrap_scripts/$suite";
+ if ($target eq "sid"
+ and not exists $suite_by_vendor{'debian'}->{$suite}) {
+ $suite_by_vendor{'debian'}->{$suite} = 0;
+ } elsif ($target eq "gutsy"
+ and not exists $suite_by_vendor{'ubuntu'}->{$suite}) {
+ $suite_by_vendor{'ubuntu'}->{$suite} = 0;
+ } elsif ($target eq "aequorea"
+ and not exists $suite_by_vendor{'tanglu'}->{$suite}) {
+ $suite_by_vendor{'tanglu'}->{$suite} = 0;
+ } elsif ($target eq "kali"
+ and not exists $suite_by_vendor{'kali'}->{$suite}) {
+ $suite_by_vendor{'kali'}->{$suite} = 0;
+ }
+ }
+ closedir($dh);
+ }
+
+ return %suite_by_vendor;
+}
+
+# try to guess the right keyring path for the given suite
+sub get_keyring_by_suite {
+ my $query = shift;
+ my $suite_by_vendor = shift;
+
+ my $debianvendor;
+ my $ubuntuvendor;
+ # make $@ local, so we don't print "Can't locate Dpkg/Vendor/Debian.pm"
+ # in other parts where we evaluate $@
+ local $@ = '';
+ eval {
+ require Dpkg::Vendor::Debian;
+ require Dpkg::Vendor::Ubuntu;
+ $debianvendor = Dpkg::Vendor::Debian->new();
+ $ubuntuvendor = Dpkg::Vendor::Ubuntu->new();
+ };
+
+ my $keyring_by_vendor = sub {
+ my $vendor = shift;
+ my $eol = shift;
+ if ($vendor eq 'debian') {
+ if ($eol) {
+ if (defined $debianvendor) {
+ return $debianvendor->run_hook(
+ 'archive-keyrings-historic');
+ } else {
+ return
+ '/usr/share/keyrings/debian-archive-removed-keys.gpg';
+ }
+ } else {
+ if (defined $debianvendor) {
+ return $debianvendor->run_hook('archive-keyrings');
+ } else {
+ return '/usr/share/keyrings/debian-archive-keyring.gpg';
+ }
+ }
+ } elsif ($vendor eq 'ubuntu') {
+ if (defined $ubuntuvendor) {
+ return $ubuntuvendor->run_hook('archive-keyrings');
+ } else {
+ return '/usr/share/keyrings/ubuntu-archive-keyring.gpg';
+ }
+ } elsif ($vendor eq 'tanglu') {
+ return '/usr/share/keyrings/tanglu-archive-keyring.gpg';
+ } elsif ($vendor eq 'kali') {
+ return '/usr/share/keyrings/kali-archive-keyring.gpg';
+ } else {
+ error "unknown vendor: $vendor";
+ }
+ };
+ my %keyrings = ();
+ foreach my $vendor (keys %{$suite_by_vendor}) {
+ foreach my $suite (keys %{ $suite_by_vendor->{$vendor} }) {
+ my $keyring = $keyring_by_vendor->(
+ $vendor, $suite_by_vendor->{$vendor}->{$suite});
+ debug "suite $suite with keyring $keyring";
+ $keyrings{$suite} = $keyring;
+ }
+ }
+
+ if (exists $keyrings{$query}) {
+ return $keyrings{$query};
+ } else {
+ return;
+ }
+}
+
+sub get_sourceslist_by_suite {
+ my $suite = shift;
+ my $arch = shift;
+ my $signedby = shift;
+ my $compstr = shift;
+ my $suite_by_vendor = shift;
+
+ my @debstable = keys %{ $suite_by_vendor->{'debian'} };
+ my @ubuntustable = keys %{ $suite_by_vendor->{'ubuntu'} };
+ my @tanglustable = keys %{ $suite_by_vendor->{'tanglu'} };
+ my @kali = keys %{ $suite_by_vendor->{'kali'} };
+
+ my $mirror = 'http://deb.debian.org/debian';
+ my $secmirror = 'http://security.debian.org/debian-security';
+ if (any { $_ eq $suite } @ubuntustable) {
+ if (any { $_ eq $arch } ('amd64', 'i386')) {
+ $mirror = 'http://archive.ubuntu.com/ubuntu';
+ $secmirror = 'http://security.ubuntu.com/ubuntu';
+ } else {
+ $mirror = 'http://ports.ubuntu.com/ubuntu-ports';
+ $secmirror = 'http://ports.ubuntu.com/ubuntu-ports';
+ }
+ if (-e '/usr/share/debootstrap/scripts/gutsy') {
+ # try running the debootstrap script but ignore errors
+ my $script = 'set -eu;
+ default_mirror() { echo $1; };
+ mirror_style() { :; };
+ download_style() { :; };
+ finddebs_style() { :; };
+ variants() { :; };
+ keyring() { :; };
+ doing_variant() { false; };
+ info() { fmt="$2"; shift; shift; printf "I: $fmt\n" "$@" >&2; };
+ . /usr/share/debootstrap/scripts/gutsy;';
+ open my $fh, '-|', 'env', "ARCH=$arch", "SUITE=$suite",
+ 'sh', '-c', $script // last;
+ chomp(
+ my $output = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ($? == 0 && $output ne '') {
+ $mirror = $output;
+ }
+ }
+ } elsif (any { $_ eq $suite } @tanglustable) {
+ $mirror = 'http://archive.tanglu.org/tanglu';
+ } elsif (any { $_ eq $suite } @kali) {
+ $mirror = 'https://http.kali.org/kali';
+ }
+ my $sourceslist = '';
+ $sourceslist .= "deb$signedby $mirror $suite $compstr\n";
+ if (any { $_ eq $suite } @ubuntustable) {
+ $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
+ $sourceslist .= "deb$signedby $secmirror $suite-security $compstr\n";
+ } elsif (any { $_ eq $suite } @tanglustable) {
+ $sourceslist .= "deb$signedby $secmirror $suite-updates $compstr\n";
+ } elsif (any { $_ eq $suite } @debstable
+ and none { $_ eq $suite } ('testing', 'unstable', 'sid')) {
+ $sourceslist .= "deb$signedby $mirror $suite-updates $compstr\n";
+ # the security mirror changes, starting with bullseye
+ # https://lists.debian.org/87r26wqr2a.fsf@43-1.org
+ my $bullseye_or_later = 0;
+ if (any { $_ eq $suite } ('stable', 'bullseye', 'bookworm', 'trixie'))
+ {
+ $bullseye_or_later = 1;
+ }
+ my $distro_info = '/usr/share/distro-info/debian.csv';
+ # make $@ local, so we don't print "Can't locate Debian/DistroInfo.pm"
+ # in other parts where we evaluate $@
+ local $@ = '';
+ eval { require Debian::DistroInfo; };
+ if (!$@) {
+ debug "libdistro-info-perl is installed";
+ my $debinfo = DebianDistroInfo->new();
+ if ($debinfo->version($suite, 0) >= 11) {
+ $bullseye_or_later = 1;
+ }
+ } elsif (-f $distro_info) {
+ debug "distro-info-data is installed";
+ open my $fh, '<', $distro_info
+ or error "cannot open $distro_info: $!";
+ my $i = 0;
+ my $matching_version;
+ my @releases;
+ my $today = POSIX::strftime "%Y-%m-%d", localtime;
+ while (my $line = <$fh>) {
+ chomp($line);
+ $i++;
+ my @cells = split /,/, $line;
+ if (scalar @cells < 4) {
+ error "cannot parse line $i of $distro_info";
+ }
+ if (
+ $i == 1
+ and ( scalar @cells < 6
+ or $cells[0] ne 'version'
+ or $cells[1] ne 'codename'
+ or $cells[2] ne 'series'
+ or $cells[3] ne 'created'
+ or $cells[4] ne 'release'
+ or $cells[5] ne 'eol')
+ ) {
+ error "cannot find correct header in $distro_info";
+ }
+ if ($i == 1) {
+ next;
+ }
+ if ( scalar @cells > 4
+ and $cells[4] =~ m/^\d\d\d\d-\d\d-\d\d$/
+ and $cells[4] lt $today) {
+ push @releases, $cells[0];
+ }
+ if (lc $cells[1] eq $suite or lc $cells[2] eq $suite) {
+ $matching_version = $cells[0];
+ last;
+ }
+ }
+ close $fh;
+ if (defined $matching_version and $matching_version >= 11) {
+ $bullseye_or_later = 1;
+ }
+ if ($suite eq "stable" and $releases[-1] >= 11) {
+ $bullseye_or_later = 1;
+ }
+ } else {
+ debug "neither libdistro-info-perl nor distro-info-data installed";
+ }
+ if ($bullseye_or_later) {
+ # starting from bullseye use
+ $sourceslist
+ .= "deb$signedby $secmirror $suite-security" . " $compstr\n";
+ } else {
+ $sourceslist
+ .= "deb$signedby $secmirror $suite/updates" . " $compstr\n";
+ }
+ }
+ return $sourceslist;
+}
+
+sub guess_sources_format {
+ my $content = shift;
+ my $is_deb822 = 0;
+ my $is_oneline = 0;
+ for my $line (split "\n", $content) {
+ if ($line =~ /^deb(-src)? /) {
+ $is_oneline = 1;
+ last;
+ }
+ if ($line =~ /^[^#:\s]+:/) {
+ $is_deb822 = 1;
+ last;
+ }
+ }
+ if ($is_deb822) {
+ return 'deb822';
+ }
+ if ($is_oneline) {
+ return 'one-line';
+ }
+ return;
+}
+
+sub approx_disk_usage {
+ my $directory = shift;
+ info "approximating disk usage...";
+ # the "du" utility reports different results depending on the underlying
+ # filesystem, see https://bugs.debian.org/650077 for a discussion
+ #
+ # we use code similar to the one used by dpkg-gencontrol instead
+ #
+ # Regular files are measured in number of 1024 byte blocks. All other
+ # entries are assumed to take one block of space.
+ #
+ # We ignore /dev because depending on the mode, the directory might be
+ # populated or not and we want consistent disk usage results independent
+ # of the mode.
+ my $installed_size = 0;
+ my %hardlink;
+ my $scan_installed_size = sub {
+ if ($File::Find::name eq "$directory/dev") {
+ # add all entries of @devfiles once
+ $installed_size += scalar @devfiles;
+ return;
+ } elsif ($File::Find::name =~ /^$directory\/dev\//) {
+ # ignore everything below /dev
+ return;
+ }
+
+ lstat or error "cannot stat $File::Find::name";
+
+ if (-f _ or -l _) {
+ my ($dev, $ino, $nlink) = (lstat _)[0, 1, 3];
+ return if exists $hardlink{"$dev:$ino"};
+ # Track hardlinks to avoid repeated additions.
+ $hardlink{"$dev:$ino"} = 1 if $nlink > 1;
+ # add file size in 1024 byte blocks, rounded up
+ $installed_size += int(((-s _) + 1024) / 1024);
+ } else {
+ # all other entries are assumed to only take up one block
+ $installed_size += 1;
+ }
+ };
+ # We use no_chdir because otherwise the unshared user has to have read
+ # permissions for the current working directory when producing an ext2
+ # image. See https://bugs.debian.org/1005857
+ find({ wanted => $scan_installed_size, no_chdir => 1 }, $directory);
+
+ # because the above is only a heuristic we add 10% extra for good measure
+ return int($installed_size * 1.1);
+}
+
+sub main() {
+ my $before = Time::HiRes::time;
+
+ umask 022;
+
+ if (scalar @ARGV >= 7 && $ARGV[0] eq "--hook-helper") {
+ shift @ARGV; # shift off "--hook-helper"
+ hookhelper(@ARGV);
+ exit 0;
+ }
+
+ # this is the counterpart to --hook-helper and will receive and carry
+ # out its instructions
+ if (scalar @ARGV == 2 && $ARGV[0] eq "--hook-listener") {
+ hooklistener($ARGV[1]);
+ exit 0;
+ }
+
+ # this is like:
+ # lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' ...
+ # but without needing lxc
+ if (scalar @ARGV >= 1 && $ARGV[0] eq "--unshare-helper") {
+ if ($EFFECTIVE_USER_ID != 0) {
+ test_unshare_userns(1);
+ }
+ my @idmap = ();
+ if ($EFFECTIVE_USER_ID != 0) {
+ @idmap = read_subuid_subgid 1;
+ }
+ my $pid = get_unshare_cmd(
+ sub {
+ 0 == system @ARGV[1 .. $#ARGV] or error "system failed: $?";
+ },
+ \@idmap
+ );
+ waitpid $pid, 0;
+ $? == 0 or error "unshared command failed";
+ exit 0;
+ }
+
+ my $mtime = time;
+ if (exists $ENV{SOURCE_DATE_EPOCH}) {
+ $mtime = $ENV{SOURCE_DATE_EPOCH} + 0;
+ }
+
+ {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{DEBIAN_FRONTEND} = 'noninteractive';
+ $ENV{DEBCONF_NONINTERACTIVE_SEEN} = 'true';
+ $ENV{LC_ALL} = 'C.UTF-8';
+ $ENV{LANGUAGE} = 'C.UTF-8';
+ $ENV{LANG} = 'C.UTF-8';
+ }
+
+ # copy ARGV because getopt modifies it
+ my @ARGVORIG = @ARGV;
+
+ # obtain the correct defaults for the keyring locations that apt knows
+ # about
+ my $apttrusted
+ = `eval \$(apt-config shell v Dir::Etc::trusted/f); printf \$v`;
+ my $apttrustedparts
+ = `eval \$(apt-config shell v Dir::Etc::trustedparts/d); printf \$v`;
+
+ chomp(my $hostarch = `dpkg --print-architecture`);
+ my $options = {
+ components => ["main"],
+ variant => "important",
+ include => [],
+ architectures => [$hostarch],
+ mode => 'auto',
+ format => 'auto',
+ dpkgopts => '',
+ aptopts => '',
+ apttrusted => $apttrusted,
+ apttrustedparts => $apttrustedparts,
+ noop => [],
+ setup_hook => [],
+ extract_hook => [],
+ essential_hook => [],
+ customize_hook => [],
+ dryrun => 0,
+ skip => [],
+ };
+ my $logfile = undef;
+ Getopt::Long::Configure('default', 'bundling', 'auto_abbrev',
+ 'ignore_case_always');
+ GetOptions(
+ 'h|help' => sub { pod2usage(-exitval => 0, -verbose => 1) },
+ 'man' => sub { pod2usage(-exitval => 0, -verbose => 2) },
+ 'version' => sub { print STDOUT "mmdebstrap $VERSION\n"; exit 0; },
+ 'components=s@' => \$options->{components},
+ 'variant=s' => \$options->{variant},
+ 'include=s' => sub {
+ my ($opt_name, $opt_value) = @_;
+ my $sanitize_path = sub {
+ my $pkg = shift;
+ $pkg = abs_path($pkg)
+ // error "cannot resolve absolute path of $pkg: $!";
+ if ($pkg !~ /^\//) {
+ error "absolute path of $pkg doesn't start with a slash";
+ }
+ if (!-f $pkg) {
+ error "$pkg is not an existing file";
+ }
+ if (!-r $pkg) {
+ error "$pkg is not readable";
+ }
+ return $pkg;
+ };
+ if ($opt_value =~ /^[?~!(]/) {
+ # Treat option as a single apt pattern and don't split by comma
+ # or whitespace -- append it verbatim.
+ push @{ $options->{include} }, $opt_value;
+ } elsif ($opt_value =~ /^\.?\.?\//) {
+ # Treat option as a single path name and don't split by comma
+ # or whitespace -- append the normalized path.
+ push @{ $options->{include} }, &{$sanitize_path}($opt_value);
+ } else {
+ for my $pkg (split /[,\s]+/, $opt_value) {
+ # strip leading and trailing whitespace
+ $pkg =~ s/^\s+|\s+$//g;
+ # skip if the remainder is an empty string
+ if ($pkg eq '') {
+ next;
+ }
+ # Make paths canonical absolute paths, resolve symlinks
+ # and check if it's an existing file.
+ if ($pkg =~ /^\.?\.?\//) {
+ $pkg = &{$sanitize_path}($pkg);
+ }
+ push @{ $options->{include} }, $pkg;
+ }
+ }
+ # We are not sorting or otherwise normalizing the order of
+ # arguments to apt because package order matters for "apt install"
+ # since https://salsa.debian.org/apt-team/apt/-/merge_requests/256
+ },
+ 'architectures=s@' => \$options->{architectures},
+ 'mode=s' => \$options->{mode},
+ 'dpkgopt=s' => sub {
+ my ($opt_name, $opt_value) = @_;
+ if (-r $opt_value) {
+ open my $fh, '<', $opt_value
+ or error "failed to open $opt_value: $!";
+ $options->{dpkgopts} .= do { local $/; <$fh> };
+ if ($options->{dpkgopts} !~ /\n$/) {
+ print $fh "\n";
+ }
+ close $fh;
+ } else {
+ $options->{dpkgopts} .= $opt_value;
+ if ($opt_value !~ /\n$/) {
+ $options->{dpkgopts} .= "\n";
+ }
+ }
+ },
+ 'aptopt=s' => sub {
+ my ($opt_name, $opt_value) = @_;
+ if (-r $opt_value) {
+ open my $fh, '<', $opt_value
+ or error "failed to open $opt_value: $!";
+ $options->{aptopts} .= do { local $/; <$fh> };
+ if ($options->{aptopts} !~ /\n$/) {
+ print $fh "\n";
+ }
+ close $fh;
+ } else {
+ $options->{aptopts} .= $opt_value;
+ if ($opt_value !~ /;$/) {
+ $options->{aptopts} .= ';';
+ }
+ if ($opt_value !~ /\n$/) {
+ $options->{aptopts} .= "\n";
+ }
+ }
+ },
+ 'keyring=s' => sub {
+ my ($opt_name, $opt_value) = @_;
+ if ($opt_value =~ /"/) {
+ error "--keyring: apt cannot handle paths with double quotes:"
+ . " $opt_value";
+ }
+ if (!-e $opt_value) {
+ error "keyring \"$opt_value\" does not exist";
+ }
+ my $abs_path = abs_path($opt_value);
+ if (!defined $abs_path) {
+ error "unable to get absolute path of --keyring: $opt_value";
+ }
+ # since abs_path resolved all symlinks for us, we can now test
+ # what the actual target actually is
+ if (-d $abs_path) {
+ $options->{apttrustedparts} = $abs_path;
+ } else {
+ $options->{apttrusted} = $abs_path;
+ }
+ },
+ 's|silent' => sub { $verbosity_level = 0; },
+ 'q|quiet' => sub { $verbosity_level = 0; },
+ 'v|verbose' => sub { $verbosity_level = 2; },
+ 'd|debug' => sub { $verbosity_level = 3; },
+ 'format=s' => \$options->{format},
+ 'logfile=s' => \$logfile,
+ # no-op options so that mmdebstrap can be used with
+ # sbuild-createchroot --debootstrap=mmdebstrap
+ 'resolve-deps' => sub { push @{ $options->{noop} }, 'resolve-deps'; },
+ 'merged-usr' => sub { push @{ $options->{noop} }, 'merged-usr'; },
+ 'no-merged-usr' =>
+ sub { push @{ $options->{noop} }, 'no-merged-usr'; },
+ 'force-check-gpg' =>
+ sub { push @{ $options->{noop} }, 'force-check-gpg'; },
+ 'setup-hook=s' => sub {
+ push @{ $options->{setup_hook} }, ["normal", $_[1]];
+ },
+ 'extract-hook=s' => sub {
+ push @{ $options->{extract_hook} }, ["normal", $_[1]];
+ },
+ 'chrooted-extract-hook=s' => sub {
+ push @{ $options->{extract_hook} }, ["pivoted", $_[1]];
+ },
+ 'essential-hook=s' => sub {
+ push @{ $options->{essential_hook} }, ["normal", $_[1]];
+ },
+ 'chrooted-essential-hook=s' => sub {
+ push @{ $options->{essential_hook} }, ["pivoted", $_[1]];
+ },
+ 'customize-hook=s' => sub {
+ push @{ $options->{customize_hook} }, ["normal", $_[1]];
+ },
+ 'chrooted-customize-hook=s' => sub {
+ push @{ $options->{customize_hook} }, ["pivoted", $_[1]];
+ },
+ 'hook-directory=s' => sub {
+ my ($opt_name, $opt_value) = @_;
+ if (!-e $opt_value) {
+ error "hook directory \"$opt_value\" does not exist";
+ }
+ my $abs_path = abs_path($opt_value);
+ if (!defined $abs_path) {
+ error( "unable to get absolute path of "
+ . "--hook-directory: $opt_value");
+ }
+ # since abs_path resolved all symlinks for us, we can now test
+ # what the actual target actually is
+ if (!-d $opt_value) {
+ error "hook directory \"$opt_value\" is not a directory";
+ }
+ # gather all files starting with special prefixes into the
+ # respective keys of a hash
+ my %scripts;
+ my $count = 0;
+ opendir(my $dh, $opt_value)
+ or error "Can't opendir($opt_value): $!";
+ while (my $entry = readdir $dh) {
+ # skip the "." and ".." entries
+ next if $entry eq ".";
+ next if $entry eq "..";
+ my $found = 0;
+ foreach
+ my $hook ('setup', 'extract', 'essential', 'customize') {
+ if ($entry =~ m/^\Q$hook\E/) {
+ if (-x "$opt_value/$entry") {
+ push @{ $scripts{$hook} }, "$opt_value/$entry";
+ $count += 1;
+ $found = 1;
+ } else {
+ warning("$opt_value/$entry is named like a "
+ . "hook but not executable");
+ }
+ }
+ }
+ if (!$found && -x "$opt_value/$entry") {
+ warning("$opt_value/$entry: is executable "
+ . "but not prefixed with a hook name");
+ }
+ }
+ closedir($dh);
+ if ($count == 0) {
+ warning "No executable hook scripts found in $opt_value";
+ return;
+ }
+ # add the sorted list associated with each key to the respective
+ # list of hooks
+ foreach my $hook (keys %scripts) {
+ push @{ $options->{"${hook}_hook"} },
+ (map { ["normal", $_] } (sort @{ $scripts{$hook} }));
+ }
+ },
+ # Sometimes --simulate fails even though non-simulate succeeds because
+ # in simulate mode, apt cannot rely on dpkg to figure out tricky
+ # dependency situations and will give up instead when it cannot find
+ # a solution.
+ #
+ # 2020-02-06, #debian-apt on OFTC, times in UTC+1
+ # 12:52 < DonKult> [...] It works in non-simulation because simulate is
+ # more picky. If you wanna know why simulate complains
+ # here prepare for long suffering in dependency hell.
+ 'simulate' => \$options->{dryrun},
+ 'dry-run' => \$options->{dryrun},
+ 'skip=s' => sub {
+ my ($opt_name, $opt_value) = @_;
+ for my $skip (split /[,\s]+/, $opt_value) {
+ # strip leading and trailing whitespace
+ $skip =~ s/^\s+|\s+$//g;
+ # skip if the remainder is an empty string
+ if ($skip eq '') {
+ next;
+ }
+ push @{ $options->{skip} }, $skip;
+ }
+ }) or pod2usage(-exitval => 2, -verbose => 0);
+
+ if (defined($logfile)) {
+ open(STDERR, '>', $logfile) or error "cannot open $logfile: $!";
+ }
+
+ foreach my $arg (@{ $options->{noop} }) {
+ info "the option --$arg is a no-op. It only exists for compatibility"
+ . " with some debootstrap wrappers.";
+ }
+
+ if ($options->{dryrun}) {
+ foreach my $hook ('setup', 'extract', 'essential', 'customize') {
+ if (scalar @{ $options->{"${hook}_hook"} } > 0) {
+ warning "In dry-run mode, --$hook-hook options have no effect";
+ }
+ if ($options->{mode} eq 'chrootless') {
+ foreach my $script (@{ $options->{"${hook}_hook"} }) {
+ if ($script->[0] eq "pivoted") {
+ error "--chrooted-$hook-hook are illegal in "
+ . "chrootless mode";
+ }
+ }
+ }
+ }
+ }
+
+ my @valid_variants = (
+ 'extract', 'custom', 'essential', 'apt',
+ 'required', 'minbase', 'buildd', 'important',
+ 'debootstrap', '-', 'standard'
+ );
+ if (none { $_ eq $options->{variant} } @valid_variants) {
+ error "invalid variant. Choose from " . (join ', ', @valid_variants);
+ }
+ # debootstrap and - are an alias for important
+ if (any { $_ eq $options->{variant} } ('-', 'debootstrap')) {
+ $options->{variant} = 'important';
+ }
+ # minbase is an alias for required
+ if ($options->{variant} eq 'minbase') {
+ $options->{variant} = 'required';
+ }
+
+ # fakeroot is an alias for fakechroot
+ if ($options->{mode} eq 'fakeroot') {
+ $options->{mode} = 'fakechroot';
+ }
+ # sudo is an alias for root
+ if ($options->{mode} eq 'sudo') {
+ $options->{mode} = 'root';
+ }
+ my @valid_modes = ('auto', 'root', 'unshare', 'fakechroot', 'chrootless');
+ if (none { $_ eq $options->{mode} } @valid_modes) {
+ error "invalid mode. Choose from " . (join ', ', @valid_modes);
+ }
+
+ # sqfs is an alias for squashfs
+ if ($options->{format} eq 'sqfs') {
+ $options->{format} = 'squashfs';
+ }
+ # dir is an alias for directory
+ if ($options->{format} eq 'dir') {
+ $options->{format} = 'directory';
+ }
+ my @valid_formats
+ = ('auto', 'directory', 'tar', 'squashfs', 'ext2', 'null');
+ if (none { $_ eq $options->{format} } @valid_formats) {
+ error "invalid format. Choose from " . (join ', ', @valid_formats);
+ }
+
+ # setting PATH for chroot, ldconfig, start-stop-daemon...
+ my $defaultpath = `eval \$(apt-config shell v DPkg::Path); printf \$v`;
+ if (length $ENV{PATH}) {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{PATH} = "$ENV{PATH}:$defaultpath";
+ } else {
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{PATH} = $defaultpath;
+ }
+
+ foreach my $tool (
+ 'dpkg', 'dpkg-deb', 'apt-get', 'apt-cache',
+ 'apt-config', 'tar', 'rm', 'find',
+ 'env'
+ ) {
+ if (!can_execute $tool) {
+ error "cannot find $tool";
+ }
+ }
+
+ {
+ my $dpkgversion = version->new(0);
+ my $pid = open my $fh, '-|' // error "failed to fork(): $!";
+ if ($pid == 0) {
+ # redirect stderr to /dev/null to hide error messages from dpkg
+ # versions before 1.20.0
+ open(STDERR, '>', '/dev/null')
+ or error "cannot open /dev/null for writing: $!";
+ exec 'dpkg', '--robot', '--version';
+ }
+ chomp(
+ my $content = do { local $/; <$fh> }
+ );
+ close $fh;
+ # the --robot option was introduced in 1.20.0 but until 1.20.2 the
+ # output contained a string after the version, separated by a
+ # whitespace -- since then, it's only the version
+ if ($? == 0 and $content =~ /^([0-9.]+).*$/) {
+ # dpkg is new enough for the --robot option
+ $dpkgversion = version->new($1);
+ }
+ if ($dpkgversion < "1.20.0") {
+ error "need dpkg >= 1.20.0 but have $dpkgversion";
+ }
+ }
+
+ {
+ my $aptversion = version->new(0);
+ my $pid = open my $fh, '-|', 'apt-get',
+ '--version' // error "failed to fork(): $!";
+ chomp(
+ my $content = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ( $? == 0
+ and $content =~ /^apt (\d+\.\d+\.\d+)\S* \(\S+\)$/am) {
+ $aptversion = version->new($1);
+ }
+ if ($aptversion < "2.3.14") {
+ error "need apt >= 2.3.14 but have $aptversion";
+ }
+ }
+
+ my $check_fakechroot_running = sub {
+ # test if we are inside fakechroot already
+ # We fork a child process because setting FAKECHROOT_DETECT seems to
+ # be an irreversible operation for fakechroot.
+ my $pid = open my $rfh, '-|' // error "failed to fork(): $!";
+ if ($pid == 0) {
+ # with the FAKECHROOT_DETECT environment variable set, any program
+ # execution will be replaced with the output "fakeroot [version]"
+ local $ENV{FAKECHROOT_DETECT} = 0;
+ exec 'echo', 'If fakechroot is running, this will not be printed';
+ }
+ my $content = do { local $/; <$rfh> };
+ waitpid $pid, 0;
+ my $result = 0;
+ if ($? == 0 and $content =~ /^fakechroot [0-9.]+$/) {
+ $result = 1;
+ }
+ return $result;
+ };
+
+ # figure out the mode to use or test whether the chosen mode is legal
+ if ($options->{mode} eq 'auto') {
+ if (&{$check_fakechroot_running}()) {
+ # if mmdebstrap is executed inside fakechroot, then we assume the
+ # user expects fakechroot mode
+ $options->{mode} = 'fakechroot';
+ } elsif ($EFFECTIVE_USER_ID == 0) {
+ # if mmdebstrap is executed as root, we assume the user wants root
+ # mode
+ $options->{mode} = 'root';
+ } elsif (test_unshare_userns(0)) {
+ # if we are not root, unshare mode is our best option if
+ # test_unshare_userns() succeeds
+ $options->{mode} = 'unshare';
+ } elsif (can_execute 'fakechroot') {
+ # the next fallback is fakechroot
+ # exec ourselves again but within fakechroot
+ my @prefix = ();
+ if ($is_covering) {
+ @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
+ }
+ exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
+ } else {
+ error( "unable to pick chroot mode automatically (use --mode for "
+ . "manual selection)");
+ }
+ info "automatically chosen mode: $options->{mode}";
+ } elsif ($options->{mode} eq 'root') {
+ if ($EFFECTIVE_USER_ID != 0) {
+ error "need to be root";
+ }
+ } elsif ($options->{mode} eq 'fakechroot') {
+ if (&{$check_fakechroot_running}()) {
+ # fakechroot is already running
+ } elsif (!can_execute 'fakechroot') {
+ error "need working fakechroot binary";
+ } else {
+ # exec ourselves again but within fakechroot
+ my @prefix = ();
+ if ($is_covering) {
+ @prefix = ($EXECUTABLE_NAME, '-MDevel::Cover=-silent,-nogcov');
+ }
+ exec 'fakechroot', 'fakeroot', @prefix, $PROGRAM_NAME, @ARGVORIG;
+ }
+ } elsif ($options->{mode} eq 'unshare') {
+ # For unshare mode to work we either need to already be the root user
+ # and then we do not have to unshare the user namespace anymore but we
+ # need to be able to unshare the mount namespace...
+ #
+ # We need to call unshare with "--propagation unchanged" or otherwise
+ # we get 'cannot change root filesystem propagation' when running
+ # mmdebstrap inside a chroot for which the root of the chroot is not
+ # its own mount point.
+ if ($EFFECTIVE_USER_ID == 0
+ && 0 != system 'unshare --mount --propagation unchanged -- true') {
+ error "unable to unshare the mount namespace";
+ }
+ # ...or we are not root and then we need to be able to unshare the user
+ # namespace.
+ if ($EFFECTIVE_USER_ID != 0) {
+ test_unshare_userns(1);
+ }
+ } elsif ($options->{mode} eq 'chrootless') {
+ if (any { $_ eq 'check/chrootless' } @{ $options->{skip} }) {
+ info "skipping check/chrootless as requested";
+ } else {
+ my $ischroot = 0 == system 'ischroot';
+ if ( $EFFECTIVE_USER_ID == 0
+ && !exists $ENV{FAKEROOTKEY}
+ && !$ischroot) {
+ error
+ "running chrootless mode as root without fakeroot might "
+ . "damage the host system if not run inside a chroot";
+ }
+ }
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ $options->{canmount} = 1;
+ if ($options->{mode} eq 'root') {
+ # It's possible to be root but not be able to mount anything.
+ # This is for example the case when running under docker.
+ # Mounting needs CAP_SYS_ADMIN which might not be available.
+ #
+ # We test for CAP_SYS_ADMIN using the capget syscall.
+ # We cannot use cap_get_proc from sys/capability.h because Perl.
+ # We don't use capsh because we don't want to depend on libcap2-bin
+ my $hdrp = pack(
+ "Li", # __u32 followed by int
+ $_LINUX_CAPABILITY_VERSION_3, # available since Linux 2.6.26
+ 0 # caps of this process
+ );
+ my $datap = pack("LLLLLL", 0, 0, 0, 0, 0, 0); # six __u32
+ 0 == syscall &SYS_capget, $hdrp, $datap
+ or error "capget failed: $!";
+ my ($effective, undef) = unpack "LLLLLL", $datap;
+ if ((($effective >> $CAP_SYS_ADMIN) & 1) != 1) {
+ warning
+ "cannot mount because CAP_SYS_ADMIN is not in the effective set";
+ $options->{canmount} = 0;
+ }
+ if (0 == syscall &SYS_prctl, $PR_CAPBSET_READ, $CAP_SYS_ADMIN) {
+ warning
+ "cannot mount because CAP_SYS_ADMIN is not in the bounding set";
+ $options->{canmount} = 0;
+ }
+ # To test whether we can use mount without actually trying to mount
+ # something we try unsharing the mount namespace. If this is allowed,
+ # then we are also allowed to mount.
+ #
+ # We need to call unshare with "--propagation unchanged" or otherwise
+ # we get 'cannot change root filesystem propagation' when running
+ # mmdebstrap inside a chroot for which the root of the chroot is not
+ # its own mount point.
+ if (0 != system 'unshare --mount --propagation unchanged -- true') {
+ # if we cannot unshare the mount namespace as root, then we also
+ # cannot mount
+ warning "cannot mount because unshare --mount failed";
+ $options->{canmount} = 0;
+ }
+ }
+
+ if (any { $_ eq $options->{mode} } ('root', 'unshare')) {
+ if (!can_execute 'mount') {
+ warning "cannot execute mount";
+ $options->{canmount} = 0;
+ }
+ }
+
+ # we can only possibly mount in root and unshare mode
+ if (none { $_ eq $options->{mode} } ('root', 'unshare')) {
+ $options->{canmount} = 0;
+ }
+
+ my @architectures = ();
+ foreach my $archs (@{ $options->{architectures} }) {
+ foreach my $arch (split /[,\s]+/, $archs) {
+ # strip leading and trailing whitespace
+ $arch =~ s/^\s+|\s+$//g;
+ # skip if the remainder is an empty string
+ if ($arch eq '') {
+ next;
+ }
+ # do not append component if it's already in the list
+ if (any { $_ eq $arch } @architectures) {
+ next;
+ }
+ push @architectures, $arch;
+ }
+ }
+
+ $options->{nativearch} = $hostarch;
+ $options->{foreignarchs} = [];
+ if (scalar @architectures == 0) {
+ warning "empty architecture list: falling back to native architecture"
+ . " $hostarch";
+ } elsif (scalar @architectures == 1) {
+ $options->{nativearch} = $architectures[0];
+ } else {
+ $options->{nativearch} = $architectures[0];
+ push @{ $options->{foreignarchs} },
+ @architectures[1 .. $#architectures];
+ }
+
+ debug "Native architecture (outside): $hostarch";
+ debug "Native architecture (inside): $options->{nativearch}";
+ debug("Foreign architectures (inside): "
+ . (join ', ', @{ $options->{foreignarchs} }));
+
+ {
+ # FIXME: autogenerate this list
+ my $deb2qemu = {
+ alpha => 'alpha',
+ amd64 => 'x86_64',
+ arm => 'arm',
+ arm64 => 'aarch64',
+ armel => 'arm',
+ armhf => 'arm',
+ hppa => 'hppa',
+ i386 => 'i386',
+ m68k => 'm68k',
+ mips => 'mips',
+ mips64 => 'mips64',
+ mips64el => 'mips64el',
+ mipsel => 'mipsel',
+ powerpc => 'ppc',
+ ppc64 => 'ppc64',
+ ppc64el => 'ppc64le',
+ riscv64 => 'riscv64',
+ s390x => 's390x',
+ sh4 => 'sh4',
+ sparc => 'sparc',
+ sparc64 => 'sparc64',
+ };
+ if (any { $_ eq 'check/qemu' } @{ $options->{skip} }) {
+ info "skipping check/qemu as requested";
+ } elsif ($options->{mode} eq "chrootless") {
+ info "skipping emulation check in chrootless mode";
+ } elsif ($options->{variant} eq "extract") {
+ info "skipping emulation check for extract variant";
+ } elsif ($hostarch ne $options->{nativearch}) {
+ if (!can_execute 'arch-test') {
+ error "install arch-test for foreign architecture support";
+ }
+ my $withemu = 0;
+ my $noemu = 0;
+ {
+ my $pid = open my $fh, '-|' // error "failed to fork(): $!";
+ if ($pid == 0) {
+ {
+ ## no critic (TestingAndDebugging::ProhibitNoWarnings)
+ # don't print a warning if the following fails
+ no warnings;
+ exec 'arch-test', $options->{nativearch};
+ }
+ # if exec didn't work (for example because the arch-test
+ # program is missing) prepare for the worst and assume that
+ # the architecture cannot be executed
+ print "$options->{nativearch}: not supported on this"
+ . " machine/kernel\n";
+ exit 1;
+ }
+ chomp(
+ my $content = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ($? == 0 and $content eq "$options->{nativearch}: ok") {
+ $withemu = 1;
+ }
+ }
+ {
+ my $pid = open my $fh, '-|' // error "failed to fork(): $!";
+ if ($pid == 0) {
+ {
+ ## no critic (TestingAndDebugging::ProhibitNoWarnings)
+ # don't print a warning if the following fails
+ no warnings;
+ exec 'arch-test', '-n', $options->{nativearch};
+ }
+ # if exec didn't work (for example because the arch-test
+ # program is missing) prepare for the worst and assume that
+ # the architecture cannot be executed
+ print "$options->{nativearch}: not supported on this"
+ . " machine/kernel\n";
+ exit 1;
+ }
+ chomp(
+ my $content = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ($? == 0 and $content eq "$options->{nativearch}: ok") {
+ $noemu = 1;
+ }
+ }
+ # four different outcomes, depending on whether arch-test
+ # succeeded with or without emulation
+ #
+ # withemu | noemu |
+ # --------+-------+-----------------
+ # 0 | 0 | test why emu doesn't work and quit
+ # 0 | 1 | should never happen
+ # 1 | 0 | use qemu emulation
+ # 1 | 1 | don't use qemu emulation
+ if ($withemu == 0 and $noemu == 0) {
+ {
+ open my $fh, '<', '/proc/filesystems'
+ or error "failed to open /proc/filesystems: $!";
+ unless (grep { /^nodev\tbinfmt_misc$/ } (<$fh>)) {
+ warning "binfmt_misc not found in /proc/filesystems --"
+ . " is the module loaded?";
+ }
+ close $fh;
+ }
+ {
+ open my $fh, '<', '/proc/mounts'
+ or error "failed to open /proc/mounts: $!";
+ unless (
+ grep {
+ /^binfmt_misc\s+
+ \/proc\/sys\/fs\/binfmt_misc\s+
+ binfmt_misc\s+/x
+ } (<$fh>)
+ ) {
+ warning "binfmt_misc not found in /proc/mounts -- not"
+ . " mounted?";
+ }
+ close $fh;
+ }
+ {
+ if (!exists $deb2qemu->{ $options->{nativearch} }) {
+ warning "no mapping from $options->{nativearch} to"
+ . " qemu-user binary";
+ } elsif (!can_execute 'update-binfmts') {
+ warning "cannot find update-binfmts";
+ } else {
+ my $binfmt_identifier
+ = 'qemu-' . $deb2qemu->{ $options->{nativearch} };
+ open my $fh, '-|', 'update-binfmts', '--display',
+ $binfmt_identifier // error "failed to fork(): $!";
+ chomp(
+ my $binfmts = do { local $/; <$fh> }
+ );
+ close $fh;
+ if ($? != 0 || $binfmts eq '') {
+ warning "$binfmt_identifier is not a supported"
+ . " binfmt name";
+ }
+ }
+ }
+ error "$options->{nativearch} can neither be executed natively"
+ . " nor via qemu user emulation with binfmt_misc";
+ } elsif ($withemu == 0 and $noemu == 1) {
+ error "arch-test succeeded without emu but not with emu";
+ } elsif ($withemu == 1 and $noemu == 0) {
+ info "$options->{nativearch} cannot be executed natively, but"
+ . " transparently using qemu-user binfmt emulation";
+ if (!exists $deb2qemu->{ $options->{nativearch} }) {
+ error "no mapping from $options->{nativearch} to qemu-user"
+ . " binary";
+ }
+ $options->{qemu} = $deb2qemu->{ $options->{nativearch} };
+ } elsif ($withemu == 1 and $noemu == 1) {
+ info "$options->{nativearch} is different from $hostarch but"
+ . " can be executed natively";
+ } else {
+ error "logic error";
+ }
+ } else {
+ info "chroot architecture $options->{nativearch} is equal to the"
+ . " host's architecture";
+ }
+ }
+
+ if (defined $options->{qemu} && $options->{mode} eq 'fakechroot') {
+ if (!can_execute 'dpkg-architecture') {
+ error "cannot find dpkg-architecture";
+ }
+ }
+
+ {
+ $options->{suite} = undef;
+ if (scalar @ARGV > 0) {
+ $options->{suite} = shift @ARGV;
+ if (scalar @ARGV > 0) {
+ $options->{target} = shift @ARGV;
+ } else {
+ $options->{target} = '-';
+ }
+ } else {
+ info
+ "No SUITE specified, expecting sources.list on standard input";
+ $options->{target} = '-';
+ }
+
+ my $sourceslists = [];
+ if (!defined $options->{suite}) {
+ # If no suite was specified, then the whole sources.list has to
+ # come from standard input
+ info "reading sources.list from standard input...";
+ my $content = do {
+ local $/;
+ ## no critic (InputOutput::ProhibitExplicitStdin)
+ <STDIN>;
+ };
+ if ($content eq "") {
+ warning "sources.list from standard input is empty";
+ } else {
+ my $type = guess_sources_format($content);
+ if (!defined $type
+ || ($type ne "deb822" and $type ne "one-line")) {
+ error "cannot determine sources.list format";
+ }
+ push @{$sourceslists},
+ {
+ type => $type,
+ fname => undef,
+ content => $content,
+ };
+ }
+ } else {
+ my @components = ();
+ foreach my $comp (@{ $options->{components} }) {
+ my @comps = split /[,\s]+/, $comp;
+ foreach my $c (@comps) {
+ # strip leading and trailing whitespace
+ $c =~ s/^\s+|\s+$//g;
+ # skip if the remainder is an empty string
+ if ($c eq "") {
+ next;
+ }
+ # do not append component if it's already in the list
+ if (any { $_ eq $c } @components) {
+ next;
+ }
+ push @components, $c;
+ }
+ }
+ my $compstr = join " ", @components;
+ # From the suite name we can maybe infer which key we need. If we
+ # can infer this information, then we need to check whether the
+ # currently running apt actually trusts this key or not. If it
+ # doesn't, then we need to add a signed-by line to the sources.list
+ # entry.
+ my $signedby = '';
+ my %suite_by_vendor = get_suite_by_vendor();
+ my $gpgproc = sub {
+ my $keyring
+ = get_keyring_by_suite($options->{suite}, \%suite_by_vendor);
+ if (!defined $keyring) {
+ debug "get_keyring_by_suite() cannot find keyring";
+ return '';
+ }
+
+ # we can only check if we need the signed-by entry if we u
+ # automatically chosen keyring exists
+ if (!defined $keyring || !-e $keyring) {
+ debug "found keyring does not exist";
+ return '';
+ }
+
+ # we can only check key material if gpg is installed
+ my $gpghome = tempdir(
+ "mmdebstrap.gpghome.XXXXXXXXXXXX",
+ TMPDIR => 1,
+ CLEANUP => 1
+ );
+ my @gpgcmd = (
+ 'gpg', '--quiet',
+ '--ignore-time-conflict', '--no-options',
+ '--no-default-keyring', '--homedir',
+ $gpghome, '--no-auto-check-trustdb',
+ );
+ my ($ret, $message);
+ {
+ my $fh;
+ {
+ # change warning handler to prevent message
+ # Can't exec "gpg": No such file or directory
+ local $SIG{__WARN__} = sub { $message = shift; };
+ $ret = open $fh, '-|', @gpgcmd, '--version';
+ }
+ # we only want to check if the gpg command exists
+ close $fh;
+ }
+ if ($? != 0 || !defined $ret || defined $message) {
+ warning
+ "gpg --version failed: cannot infer signed-by value";
+ return '';
+ }
+ # initialize gpg trustdb with empty one
+ {
+ 0 == system(@gpgcmd, '--update-trustdb')
+ or error "gpg failed to initialize trustdb:: $?";
+ }
+ if (!-d $options->{apttrustedparts}) {
+ warning "$options->{apttrustedparts} doesn't exist";
+ return '';
+ }
+ # find all the fingerprints of the keys apt currently
+ # knows about
+ my @keyrings = ();
+ opendir my $dh, $options->{apttrustedparts}
+ or error "cannot read $options->{apttrustedparts}";
+ while (my $filename = readdir $dh) {
+ if ($filename !~ /\.(asc|gpg)$/) {
+ next;
+ }
+ $filename = "$options->{apttrustedparts}/$filename";
+ # skip empty keyrings
+ -s "$filename" || next;
+ push @keyrings, $filename;
+ }
+ closedir $dh;
+ if (-s $options->{apttrusted}) {
+ push @keyrings, $options->{apttrusted};
+ }
+ my @aptfingerprints = ();
+ if (scalar @keyrings == 0) {
+ debug "no keyring is trusted by apt";
+ return " [signed-by=\"$keyring\"]";
+ }
+ info "finding correct signed-by value...";
+ my $progress = 0.0;
+ print_progress($progress);
+ for (my $i = 0 ; $i < scalar @keyrings ; $i++) {
+ my $k = $keyrings[$i];
+ open(my $fh, '-|', @gpgcmd, '--with-colons',
+ '--show-keys', $k) // error "failed to fork(): $!";
+ while (my $line = <$fh>) {
+ if ($line !~ /^fpr:::::::::([^:]+):/) {
+ next;
+ }
+ push @aptfingerprints, $1;
+ }
+ close $fh;
+ if ($? != 0) {
+ warning("gpg failed to read $k");
+ }
+ print_progress($i / (scalar @keyrings) * 100.0, undef);
+ }
+ print_progress("done");
+ if (scalar @aptfingerprints == 0) {
+ debug "no fingerprints found";
+ return " [signed-by=\"$keyring\"]";
+ }
+ # check if all fingerprints from the keyring that we guessed
+ # are known by apt and only add signed-by option if that's not
+ # the case
+ my @suitefingerprints = ();
+ {
+ open(my $fh, '-|', @gpgcmd, '--with-colons', '--show-keys',
+ $keyring) // error "failed to fork(): $!";
+ while (my $line = <$fh>) {
+ if ($line !~ /^fpr:::::::::([^:]+):/) {
+ next;
+ }
+ # if this fingerprint is not known by apt, then we need
+ #to add the signed-by option
+ if (none { $_ eq $1 } @aptfingerprints) {
+ debug "fingerprint $1 is not trusted by apt";
+ return " [signed-by=\"$keyring\"]";
+ }
+ }
+ close $fh;
+ if ($? != 0) {
+ warning "gpg failed -- cannot infer signed-by value";
+ }
+ }
+ return '';
+ };
+ if (any { $_ eq 'check/signed-by' } @{ $options->{skip} }) {
+ info "skipping check/signed-by as requested";
+ } else {
+ $signedby = $gpgproc->();
+ }
+ if (scalar @ARGV > 0) {
+ for my $arg (@ARGV) {
+ if ($arg eq '-') {
+ info 'reading sources.list from standard input...';
+ my $content = do {
+ local $/;
+ ## no critic (InputOutput::ProhibitExplicitStdin)
+ <STDIN>;
+ };
+ if ($content eq "") {
+ warning
+ "sources.list from standard input is empty";
+ } else {
+ my $type = guess_sources_format($content);
+ if (!defined $type
+ || ($type ne 'deb822' and $type ne 'one-line'))
+ {
+ error "cannot determine sources.list format";
+ }
+ # if last entry is of same type and without filename,
+ # then append
+ if ( scalar @{$sourceslists} > 0
+ && $sourceslists->[-1]{type} eq $type
+ && !defined $sourceslists->[-1]{fname}) {
+ $sourceslists->[-1]{content}
+ .= ($type eq 'one-line' ? "\n" : "\n\n")
+ . $content;
+ } else {
+ push @{$sourceslists},
+ {
+ type => $type,
+ fname => undef,
+ content => $content,
+ };
+ }
+ }
+ } elsif ($arg =~ /^deb(-src)? /) {
+ my $content = "$arg\n";
+ # if last entry is of same type and without filename,
+ # then append
+ if ( scalar @{$sourceslists} > 0
+ && $sourceslists->[-1]{type} eq 'one-line'
+ && !defined $sourceslists->[-1]{fname}) {
+ $sourceslists->[-1]{content} .= "\n" . $content;
+ } else {
+ push @{$sourceslists},
+ {
+ type => 'one-line',
+ fname => undef,
+ content => $content,
+ };
+ }
+ } elsif ($arg =~ /:\/\//) {
+ my $content = join ' ',
+ (
+ "deb$signedby",
+ $arg, $options->{suite}, "$compstr\n"
+ );
+ # if last entry is of same type and without filename,
+ # then append
+ if ( scalar @{$sourceslists} > 0
+ && $sourceslists->[-1]{type} eq 'one-line'
+ && !defined $sourceslists->[-1]{fname}) {
+ $sourceslists->[-1]{content} .= "\n" . $content;
+ } else {
+ push @{$sourceslists},
+ {
+ type => 'one-line',
+ fname => undef,
+ content => $content,
+ };
+ }
+ } elsif (-f $arg) {
+ my $content = '';
+ open my $fh, '<', $arg or error "cannot open $arg: $!";
+ while (my $line = <$fh>) {
+ $content .= $line;
+ }
+ close $fh;
+ if ($content eq "") {
+ warning "$arg is empty";
+ } else {
+ my $type = undef;
+ if ($arg =~ /\.list$/) {
+ $type = 'one-line';
+ } elsif ($arg =~ /\.sources$/) {
+ $type = 'deb822';
+ } else {
+ $type = guess_sources_format($content);
+ }
+ if (!defined $type
+ || ($type ne 'deb822' and $type ne 'one-line'))
+ {
+ error "cannot determine sources.list format";
+ }
+ push @{$sourceslists},
+ {
+ type => $type,
+ fname => basename($arg),
+ content => $content,
+ };
+ }
+ } elsif ($arg eq '') {
+ # empty
+ } else {
+ error "invalid mirror: $arg";
+ }
+ }
+ } else {
+ my $sourceslist
+ = get_sourceslist_by_suite($options->{suite},
+ $options->{nativearch},
+ $signedby, $compstr, \%suite_by_vendor);
+ push @{$sourceslists},
+ {
+ type => 'one-line',
+ fname => undef,
+ content => $sourceslist,
+ };
+ }
+ }
+ if (scalar @{$sourceslists} == 0) {
+ warning "empty apt sources.list";
+ }
+ debug("sources list entries:");
+ for my $list (@{$sourceslists}) {
+ if (defined $list->{fname}) {
+ debug("fname: $list->{fname}");
+ }
+ debug("type: $list->{type}");
+ debug("content:");
+ for my $line (split "\n", $list->{content}) {
+ debug(" $line");
+ }
+ }
+ $options->{sourceslists} = $sourceslists;
+ }
+
+ if ($options->{target} eq '-') {
+ if (POSIX::isatty STDOUT) {
+ error "stdout is a an interactive tty";
+ }
+ } else {
+ my $abs_path = abs_path($options->{target});
+ if (!defined $abs_path) {
+ error "unable to get absolute path of target directory"
+ . " $options->{target}";
+ }
+ $options->{target} = $abs_path;
+ }
+
+ if ($options->{target} eq '/') {
+ error "refusing to use the filesystem root as output directory";
+ }
+
+ my $tar_compressor = get_tar_compressor($options->{target});
+
+ # figure out the right format
+ if ($options->{format} eq 'auto') {
+ # (stat(...))[6] is the device identifier which contains the major and
+ # minor numbers for character special files
+ # major 1 and minor 3 is /dev/null on Linux
+ if ( $options->{target} eq '/dev/null'
+ and $OSNAME eq 'linux'
+ and -c '/dev/null'
+ and major((stat("/dev/null"))[6]) == 1
+ and minor((stat("/dev/null"))[6]) == 3) {
+ $options->{format} = 'null';
+ } elsif ($options->{target} eq '-'
+ and $OSNAME eq 'linux'
+ and major((stat(STDOUT))[6]) == 1
+ and minor((stat(STDOUT))[6]) == 3) {
+ # by checking the major and minor number of the STDOUT fd we also
+ # can detect redirections to /dev/null and choose the null format
+ # accordingly
+ $options->{format} = 'null';
+ } elsif ($options->{target} ne '-' and -d $options->{target}) {
+ $options->{format} = 'directory';
+ } elsif (
+ defined $tar_compressor
+ or $options->{target} =~ /\.tar$/
+ or $options->{target} eq '-'
+ or -p $options->{target} # named pipe (fifo)
+ or -c $options->{target} # character special like /dev/null
+ ) {
+ $options->{format} = 'tar';
+ # check if the compressor is installed
+ if (defined $tar_compressor) {
+ my $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ open(STDOUT, '>', '/dev/null')
+ or error "cannot open /dev/null for writing: $!";
+ open(STDIN, '<', '/dev/null')
+ or error "cannot open /dev/null for reading: $!";
+ exec { $tar_compressor->[0] } @{$tar_compressor}
+ or error("cannot exec "
+ . (join " ", @{$tar_compressor})
+ . ": $!");
+ }
+ waitpid $pid, 0;
+ if ($? != 0) {
+ error("failed to start " . (join " ", @{$tar_compressor}));
+ }
+ }
+ } elsif ($options->{target} =~ /\.(squashfs|sqfs)$/) {
+ $options->{format} = 'squashfs';
+ # check if tar2sqfs is installed
+ my $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ open(STDOUT, '>', '/dev/null')
+ or error "cannot open /dev/null for writing: $!";
+ open(STDIN, '<', '/dev/null')
+ or error "cannot open /dev/null for reading: $!";
+ exec('tar2sqfs', '--version')
+ or error("cannot exec tar2sqfs --version: $!");
+ }
+ waitpid $pid, 0;
+ if ($? != 0) {
+ error("failed to start tar2sqfs --version");
+ }
+ } elsif ($options->{target} =~ /\.ext2$/) {
+ $options->{format} = 'ext2';
+ # check if the installed version of genext2fs supports tarballs on
+ # stdin
+ (undef, my $filename) = tempfile(
+ "mmdebstrap.ext2.XXXXXXXXXXXX",
+ OPEN => 0,
+ TMPDIR => 1
+ );
+ open my $fh, '|-', 'genext2fs', '-B', '1024', '-b', '8', '-N',
+ '11', '-a', '-', $filename // error "failed to fork(): $!";
+ # write 10240 null-bytes to genext2fs -- this represents an empty
+ # tar archive
+ print $fh ("\0" x 10240)
+ or error "cannot write to genext2fs process";
+ close $fh;
+ my $exitstatus = $?;
+ unlink $filename // die "cannot unlink $filename";
+ if ($exitstatus != 0) {
+ error "genext2fs failed with exit status: $exitstatus";
+ }
+ } else {
+ $options->{format} = 'directory';
+ }
+ info "automatically chosen format: $options->{format}";
+ }
+
+ if ( $options->{target} eq '-'
+ and $options->{format} ne 'tar'
+ and $options->{format} ne 'null') {
+ error "the $options->{format} format is unable to write to stdout";
+ }
+
+ if ($options->{format} eq 'null'
+ and none { $_ eq $options->{target} } ('-', '/dev/null')) {
+ info "ignoring target $options->{target} with null format";
+ }
+
+ if ($options->{format} eq 'ext2') {
+ if (!can_execute 'genext2fs') {
+ error "need genext2fs for ext2 format";
+ }
+ } elsif ($options->{format} eq 'squashfs') {
+ if (!can_execute 'tar2sqfs') {
+ error "need tar2sqfs binary from the squashfs-tools-ng package";
+ }
+ }
+
+ if (any { $_ eq $options->{format} } ('tar', 'squashfs', 'ext2', 'null')) {
+ if ($options->{format} ne 'null') {
+ if (any { $_ eq $options->{variant} } ('extract', 'custom')
+ and $options->{mode} eq 'fakechroot') {
+ info "creating a tarball or squashfs image or ext2 image in"
+ . " fakechroot mode might fail in extract and"
+ . " custom variants because there might be no tar inside the"
+ . " chroot";
+ }
+ # try to fail early if target tarball or squashfs image cannot be
+ # opened for writing
+ if ($options->{target} ne '-') {
+ if ($options->{dryrun}) {
+ if (-e $options->{target}) {
+ info "not overwriting $options->{target} because in"
+ . " dry-run mode";
+ }
+ } else {
+ open my $fh, '>', $options->{target}
+ or error
+ "cannot open $options->{target} for writing: $!";
+ close $fh;
+ }
+ }
+ }
+ # since the output is a tarball, we create the rootfs in a temporary
+ # directory
+ $options->{root} = tempdir('mmdebstrap.XXXXXXXXXX', TMPDIR => 1);
+ info "using $options->{root} as tempdir";
+ # in unshare and root mode, other users than the current user need to
+ # access the rootfs, most prominently, the _apt user. Thus, make the
+ # temporary directory world readable.
+ if (
+ any { $_ eq $options->{mode} } ('unshare', 'root')
+ or ($EFFECTIVE_USER_ID == 0 and $options->{mode} eq 'chrootless')
+ ) {
+ chmod 0755, $options->{root} or error "cannot chmod root: $!";
+ }
+ } elsif ($options->{format} eq 'directory') {
+ # user does not seem to have specified a tarball as output, thus work
+ # directly in the supplied directory
+ $options->{root} = $options->{target};
+ if (-e $options->{root}) {
+ if (!-d $options->{root}) {
+ error "$options->{root} exists and is not a directory";
+ }
+ if (any { $_ eq 'check/empty' } @{ $options->{skip} }) {
+ info "skipping check/empty as requested";
+ } else {
+ # check if the directory is empty or contains nothing more than
+ # an empty lost+found directory. The latter exists on freshly
+ # created ext3 and ext4 partitions.
+ # rationale for requiring an empty directory:
+ # https://bugs.debian.org/833525
+ opendir(my $dh, $options->{root})
+ or error "Can't opendir($options->{root}): $!";
+ while (my $entry = readdir $dh) {
+ # skip the "." and ".." entries
+ next if $entry eq ".";
+ next if $entry eq "..";
+ # if the entry is a directory named "lost+found" then skip
+ # it, if it's empty
+ if ($entry eq "lost+found"
+ and -d "$options->{root}/$entry") {
+ opendir(my $dh2, "$options->{root}/$entry");
+ # Attempt reading the directory thrice. If the third
+ # time succeeds, then it has more entries than just "."
+ # and ".." and must thus not be empty.
+ readdir $dh2;
+ readdir $dh2;
+ # rationale for requiring an empty directory:
+ # https://bugs.debian.org/833525
+ if (readdir $dh2) {
+ error "$options->{root} contains a non-empty"
+ . " lost+found directory";
+ }
+ closedir($dh2);
+ } else {
+ error "$options->{root} is not empty";
+ }
+ }
+ closedir($dh);
+ }
+ } else {
+ my $num_created = make_path "$options->{root}",
+ { error => \my $err };
+ if ($err && @$err) {
+ error(join "; ",
+ (map { "cannot create " . (join ": ", %{$_}) } @$err));
+ } elsif ($num_created == 0) {
+ error "cannot create $options->{root}";
+ }
+ }
+ } else {
+ error "unknown format: $options->{format}";
+ }
+
+ # check for double quotes because apt doesn't allow to escape them and
+ # thus paths with double quotes are invalid in the apt config
+ if ($options->{root} =~ /"/) {
+ error "apt cannot handle paths with double quotes";
+ }
+
+ my @idmap;
+ # for unshare mode the rootfs directory has to have appropriate
+ # permissions
+ if ($EFFECTIVE_USER_ID != 0 and $options->{mode} eq 'unshare') {
+ @idmap = read_subuid_subgid 1;
+ # sanity check
+ if ( scalar(@idmap) != 2
+ || $idmap[0][0] ne 'u'
+ || $idmap[1][0] ne 'g'
+ || !length $idmap[0][2]
+ || !length $idmap[1][2]) {
+ error "invalid idmap";
+ }
+
+ my $outer_gid = $REAL_GROUP_ID + 0;
+
+ my $pid = get_unshare_cmd(
+ sub { chown 1, 1, $options->{root} },
+ [
+ ['u', '0', $REAL_USER_ID, '1'],
+ ['g', '0', $outer_gid, '1'],
+ ['u', '1', $idmap[0][2], '1'],
+ ['g', '1', $idmap[1][2], '1']]);
+ waitpid $pid, 0;
+ $? == 0 or error "chown failed";
+ }
+
+ # check if .deb files given by --include are readable by the unshared user
+ if ($options->{mode} eq 'unshare'
+ and scalar(grep { /^\// } @{ $options->{include} }) > 0) {
+ my $pid = get_unshare_cmd(
+ sub {
+ my $ret = 0;
+ foreach my $f (grep { /^\// } @{ $options->{include} }) {
+ # open the file for real because -r will report the file as
+ # readable even though open will fail (in contrast to the
+ # coreutils test utility, perl doesn't use faccessat)
+ my $res = open(my $fh, '<', $f);
+ if (!$res) {
+ warning "unshared user cannot access $f for reading";
+ $ret = 1;
+ } else {
+ close $fh;
+ }
+ }
+ exit $ret;
+ },
+ \@idmap
+ );
+ waitpid $pid, 0;
+ if ($? != 0) {
+ warning("apt on the outside is run as the unshared user and "
+ . "needs read access to packages outside the chroot given "
+ . "via --include");
+ }
+ }
+
+ # figure out whether we have mknod
+ $options->{havemknod} = 0;
+ if ($options->{mode} eq 'unshare') {
+ my $pid = get_unshare_cmd(
+ sub {
+ $options->{havemknod} = havemknod($options->{root});
+ },
+ \@idmap
+ );
+ waitpid $pid, 0;
+ $? == 0 or error "havemknod failed";
+ } elsif (any { $_ eq $options->{mode} }
+ ('root', 'fakechroot', 'chrootless')) {
+ $options->{havemknod} = havemknod($options->{root});
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ # If a tarball is to be created, we always (except if --skip=output/dev is
+ # passed) craft the /dev entries ourselves.
+ # Why do we put /dev entries in the final tarball?
+ # - because debootstrap does it
+ # - because schroot (#856877) and pbuilder rely on it and we care about
+ # Debian buildds (using schroot) and reproducible builds infra (using
+ # pbuilder)
+ # If both the above assertion change, we can stop creating /dev entries as
+ # well.
+ my $devtar = '';
+ if (any { $_ eq $options->{format} } ('tar', 'squashfs', 'ext2')) {
+ foreach my $file (@devfiles) {
+ my ($fname, $mode, $type, $linkname, $devmajor, $devminor)
+ = @{$file};
+ if (length "./dev/$fname" > 100) {
+ error "tar entry cannot exceed 100 characters";
+ }
+ if ($type == 3
+ and any { $_ eq 'output/mknod' } @{ $options->{skip} }) {
+ info "skipping output/mknod as requested for ./dev/$fname";
+ next;
+ }
+ my $entry = pack(
+ 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a8 a32 a32 a8 a8 a155 x12',
+ "./dev/$fname",
+ sprintf('%07o', $mode),
+ sprintf('%07o', 0), # uid
+ sprintf('%07o', 0), # gid
+ sprintf('%011o', 0), # size
+ sprintf('%011o', $mtime),
+ '', # checksum
+ $type,
+ $linkname,
+ "ustar ",
+ '', # username
+ '', # groupname
+ defined($devmajor) ? sprintf('%07o', $devmajor) : '',
+ defined($devminor) ? sprintf('%07o', $devminor) : '',
+ '', # prefix
+ );
+ # compute and insert checksum
+ substr($entry, 148, 7)
+ = sprintf("%06o\0", unpack("%16C*", $entry));
+ $devtar .= $entry;
+ }
+ } elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
+ # nothing to do
+ } else {
+ error "unknown format: $options->{format}";
+ }
+
+ my $exitstatus = 0;
+ my @taropts = (
+ '--sort=name',
+ "--mtime=\@$mtime",
+ '--clamp-mtime',
+ '--numeric-owner',
+ '--one-file-system',
+ '--format=pax',
+ '--pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime',
+ '-c',
+ '--exclude=./lost+found'
+ );
+ # only exclude ./dev if device nodes are written out (the default)
+ if (none { $_ eq 'output/dev' } @{ $options->{skip} }) {
+ push @taropts, '--exclude=./dev';
+ }
+ # tar2sqfs and genext2fs do not support extended attributes
+ if ($options->{format} eq "squashfs") {
+ # tar2sqfs supports user.*, trusted.* and security.* but not system.*
+ # https://bugs.debian.org/988100
+ # lib/sqfs/xattr/xattr.c of https://github.com/AgentD/squashfs-tools-ng
+ # https://github.com/AgentD/squashfs-tools-ng/issues/83
+ # https://github.com/AgentD/squashfs-tools-ng/issues/25
+ warning("tar2sqfs does not support extended attributes"
+ . " from the 'system' namespace");
+ push @taropts, '--xattrs', '--xattrs-exclude=system.*';
+ } elsif ($options->{format} eq "ext2") {
+ warning "genext2fs does not support extended attributes";
+ } else {
+ push @taropts, '--xattrs';
+ }
+
+ # disable signals so that we can fork and change behaviour of the signal
+ # handler in the parent and child without getting interrupted
+ my $sigset = POSIX::SigSet->new(SIGINT, SIGHUP, SIGPIPE, SIGTERM);
+ POSIX::sigprocmask(SIG_BLOCK, $sigset) or error "Can't block signals: $!";
+
+ my $pid;
+
+ # a pipe to transfer the final tarball from the child to the parent
+ pipe my $rfh, my $wfh;
+
+ # instead of two pipe calls, creating four file handles, we use socketpair
+ socketpair my $childsock, my $parentsock, AF_UNIX, SOCK_STREAM, PF_UNSPEC
+ or error "socketpair failed: $!";
+ $options->{hooksock} = $childsock;
+ # for communicating the required number of blocks, we don't need
+ # bidirectional communication, so a pipe() is enough
+ # we don't communicate this via the hook communication because
+ # a) this would abuse the functionality exclusively for hooks
+ # b) it puts code writing the protocol outside of the helper/listener
+ # c) the forked listener process cannot communicate to its parent
+ pipe my $nblkreader, my $nblkwriter or error "pipe failed: $!";
+ if ($options->{mode} eq 'unshare') {
+ $pid = get_unshare_cmd(
+ sub {
+ # child
+ local $SIG{'INT'} = 'DEFAULT';
+ local $SIG{'HUP'} = 'DEFAULT';
+ local $SIG{'PIPE'} = 'DEFAULT';
+ local $SIG{'TERM'} = 'DEFAULT';
+
+ # unblock all delayed signals (and possibly handle them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ close $rfh;
+ close $parentsock;
+ open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
+
+ setup($options);
+
+ print $childsock (pack('n', 0) . 'adios');
+ $childsock->flush();
+
+ close $childsock;
+
+ close $nblkreader;
+ if (!$options->{dryrun} && $options->{format} eq 'ext2') {
+ my $numblocks = approx_disk_usage($options->{root});
+ print $nblkwriter "$numblocks\n";
+ $nblkwriter->flush();
+ }
+ close $nblkwriter;
+
+ if ($options->{dryrun}) {
+ info "simulate creating tarball...";
+ } elsif (any { $_ eq $options->{format} }
+ ('tar', 'squashfs', 'ext2')) {
+ info "creating tarball...";
+
+ # redirect tar output to the writing end of the pipe so
+ # that the parent process can capture the output
+ open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
+
+ # Add ./dev as the first entries of the tar file.
+ # We cannot add them after calling tar, because there is no
+ # way to prevent tar from writing NULL entries at the end.
+ if (any { $_ eq 'output/dev' } @{ $options->{skip} }) {
+ info "skipping output/dev as requested";
+ } else {
+ print $devtar;
+ }
+
+ # pack everything except ./dev
+ 0 == system('tar', @taropts, '-C', $options->{root}, '.')
+ or error "tar failed: $?";
+
+ info "done";
+ } elsif (any { $_ eq $options->{format} }
+ ('directory', 'null')) {
+ # nothing to do
+ } else {
+ error "unknown format: $options->{format}";
+ }
+
+ exit 0;
+ },
+ \@idmap
+ );
+ } elsif (any { $_ eq $options->{mode} }
+ ('root', 'fakechroot', 'chrootless')) {
+ $pid = fork() // error "fork() failed: $!";
+ if ($pid == 0) {
+ local $SIG{'INT'} = 'DEFAULT';
+ local $SIG{'HUP'} = 'DEFAULT';
+ local $SIG{'PIPE'} = 'DEFAULT';
+ local $SIG{'TERM'} = 'DEFAULT';
+
+ # unblock all delayed signals (and possibly handle them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ close $rfh;
+ close $parentsock;
+ open(STDOUT, '>&', STDERR) or error "cannot open STDOUT: $!";
+
+ setup($options);
+
+ print $childsock (pack('n', 0) . 'adios');
+ $childsock->flush();
+
+ close $childsock;
+
+ close $nblkreader;
+ if (!$options->{dryrun} && $options->{format} eq 'ext2') {
+ my $numblocks = approx_disk_usage($options->{root});
+ print $nblkwriter $numblocks;
+ $nblkwriter->flush();
+ }
+ close $nblkwriter;
+
+ if ($options->{dryrun}) {
+ info "simulate creating tarball...";
+ } elsif (any { $_ eq $options->{format} }
+ ('tar', 'squashfs', 'ext2')) {
+ info "creating tarball...";
+
+ # redirect tar output to the writing end of the pipe so that
+ # the parent process can capture the output
+ open(STDOUT, '>&', $wfh) or error "cannot open STDOUT: $!";
+
+ # Add ./dev as the first entries of the tar file.
+ # We cannot add them after calling tar, because there is no way
+ # to prevent tar from writing NULL entries at the end.
+ if (any { $_ eq 'output/dev' } @{ $options->{skip} }) {
+ info "skipping output/dev as requested";
+ } else {
+ print $devtar;
+ }
+
+ if ($options->{mode} eq 'fakechroot') {
+ # By default, FAKECHROOT_EXCLUDE_PATH includes /proc and
+ # /sys which means that the resulting tarball will contain
+ # the permission and ownership information of /proc and
+ # /sys from the outside, which we want to avoid.
+ ## no critic (Variables::RequireLocalizedPunctuationVars)
+ $ENV{FAKECHROOT_EXCLUDE_PATH} = "/dev";
+ # Fakechroot requires tar to run inside the chroot or
+ # otherwise absolute symlinks will include the path to the
+ # root directory
+ 0 == system('chroot', $options->{root}, 'tar',
+ @taropts, '-C', '/', '.')
+ or error "tar failed: $?";
+ } elsif (any { $_ eq $options->{mode} } ('root', 'chrootless'))
+ {
+ # If the chroot directory is not owned by the root user,
+ # then we assume that no measure was taken to fake root
+ # permissions. Since the final tarball should contain
+ # entries with root ownership, we instruct tar to do so.
+ my @owneropts = ();
+ if ((stat $options->{root})[4] != 0) {
+ push @owneropts, '--owner=0', '--group=0',
+ '--numeric-owner';
+ }
+ 0 == system('tar', @taropts, @owneropts, '-C',
+ $options->{root}, '.')
+ or error "tar failed: $?";
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ info "done";
+ } elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
+ # nothing to do
+ } else {
+ error "unknown format: $options->{format}";
+ }
+
+ exit 0;
+ }
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+
+ # parent
+
+ my $got_signal = 0;
+ my $waiting_for = "setup";
+ my $ignore = sub {
+ $got_signal = shift;
+ info "main() received signal $got_signal: waiting for $waiting_for...";
+ };
+
+ local $SIG{'INT'} = $ignore;
+ local $SIG{'HUP'} = $ignore;
+ local $SIG{'PIPE'} = $ignore;
+ local $SIG{'TERM'} = $ignore;
+
+ # unblock all delayed signals (and possibly handle them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ close $wfh;
+ close $childsock;
+
+ debug "starting to listen for hooks";
+ # handle special hook commands via parentsock
+ my $lpid = fork() // error "fork() failed: $!";
+ if ($lpid == 0) {
+ # whatever the script writes on stdout is sent to the
+ # socket
+ # whatever is written to the socket, send to stdin
+ open(STDOUT, '>&', $parentsock)
+ or error "cannot open STDOUT: $!";
+ open(STDIN, '<&', $parentsock)
+ or error "cannot open STDIN: $!";
+
+ hooklistener($verbosity_level);
+ exit 0;
+ }
+ waitpid($lpid, 0);
+ if ($? != 0) {
+ # we cannot die here because that would leave the other thread
+ # running without a parent
+ warning "listening on child socket failed: $@";
+ $exitstatus = 1;
+ }
+ debug "finish to listen for hooks";
+
+ close $parentsock;
+
+ my $numblocks = 0;
+ close $nblkwriter;
+ if (!$options->{dryrun} && $options->{format} eq 'ext2') {
+ $numblocks = <$nblkreader>;
+ if (defined $numblocks) {
+ chomp $numblocks;
+ } else {
+ # This can happen if the setup process died early and thus closes
+ # the pipe from the other and. The EOF is turned into undef.
+ # we cannot die here because that would skip the cleanup task
+ warning "failed to read required number of blocks";
+ $exitstatus = 1;
+ $numblocks = -1;
+ }
+ }
+ close $nblkreader;
+
+ if ($options->{dryrun}) {
+ # nothing to do
+ } elsif (any { $_ eq $options->{format} } ('directory', 'null')) {
+ # nothing to do
+ } elsif ($options->{format} eq 'ext2' && $numblocks <= 0) {
+ # nothing to do because of invalid $numblocks
+ } elsif (any { $_ eq $options->{format} } ('tar', 'squashfs', 'ext2')) {
+ # we use eval() so that error() doesn't take this process down and
+ # thus leaves the setup() process without a parent
+ eval {
+ if ($options->{target} eq '-') {
+ if (!copy($rfh, *STDOUT)) {
+ error "cannot copy to standard output: $!";
+ }
+ } else {
+ if ( $options->{format} eq 'squashfs'
+ or $options->{format} eq 'ext2'
+ or defined $tar_compressor) {
+ my @argv = ();
+ if ($options->{format} eq 'squashfs') {
+ push @argv, 'tar2sqfs',
+ '--quiet', '--no-skip', '--force',
+ '--exportable',
+ '--compressor', 'xz',
+ '--block-size', '1048576',
+ $options->{target};
+ } elsif ($options->{format} eq 'ext2') {
+ if ($numblocks <= 0) {
+ error "invalid number of blocks: $numblocks";
+ }
+ push @argv, 'genext2fs', '-B', 1024, '-b', $numblocks,
+ '-i', '16384', '-a', '-', $options->{target};
+ } elsif ($options->{format} eq 'tar') {
+ push @argv, @{$tar_compressor};
+ } else {
+ error "unknown format: $options->{format}";
+ }
+ POSIX::sigprocmask(SIG_BLOCK, $sigset)
+ or error "Can't block signals: $!";
+ my $cpid = fork() // error "fork() failed: $!";
+ if ($cpid == 0) {
+ # child: default signal handlers
+ local $SIG{'INT'} = 'DEFAULT';
+ local $SIG{'HUP'} = 'DEFAULT';
+ local $SIG{'PIPE'} = 'DEFAULT';
+ local $SIG{'TERM'} = 'DEFAULT';
+
+ # unblock all delayed signals (and possibly handle
+ # them)
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+
+ # redirect stdout to file or /dev/null
+ if ( $options->{format} eq 'squashfs'
+ or $options->{format} eq 'ext2') {
+ open(STDOUT, '>', '/dev/null')
+ or error "cannot open /dev/null for writing: $!";
+ } elsif ($options->{format} eq 'tar') {
+ open(STDOUT, '>', $options->{target})
+ or error
+ "cannot open $options->{target} for writing: $!";
+ } else {
+ error "unknown format: $options->{format}";
+ }
+ open(STDIN, '<&', $rfh)
+ or error "cannot open file handle for reading: $!";
+ eval { Devel::Cover::set_coverage("none") }
+ if $is_covering;
+ exec { $argv[0] } @argv
+ or
+ error("cannot exec " . (join " ", @argv) . ": $!");
+ }
+ POSIX::sigprocmask(SIG_UNBLOCK, $sigset)
+ or error "Can't unblock signals: $!";
+ waitpid $cpid, 0;
+ if ($? != 0) {
+ error("failed to run " . (join " ", @argv));
+ }
+ } else {
+ # somehow, when running under qemu, writing to a virtio
+ # device will not result in a ENOSPC but just stall forever
+ if (!copy($rfh, $options->{target})) {
+ error "cannot copy to $options->{target}: $!";
+ }
+ }
+ }
+ };
+ if ($@) {
+ # we cannot die here because that would leave the other thread
+ # running without a parent
+ # We send SIGHUP to all our processes (including eventually
+ # running tar and this process itself) to reliably tear down
+ # all running child processes. The main process is not affected
+ # because we are ignoring SIGHUP.
+ #
+ # FIXME: this codepath becomes dangerous in case mmdebstrap is not
+ # run in its own process group. When run from the terminal, the
+ # shell creates a new process group as part of its job control, so
+ # sending SIGHUP to all processes in our own process group should
+ # not be dangerous. But for example, on debci, lxc will run in the
+ # same process group as mmdebstrap and sending SIGHUP to the whole
+ # process group will also kill lxc. Creating a new process group
+ # for $pid will break things because only the foreground job is
+ # allowed to read from the terminal. If a background job does it,
+ # i will be suspended with SIGTTIN. Even though apt could be told
+ # to not read from the terminal by opening STDIN from /dev/null,
+ # this would make --chrooted-customize-hook=bash impossible.
+ # Making the $pid process group the foreground job will destroy all
+ # the signal handling we have set up for when the user presses
+ # ctrl+c in a terminal. Even if we fix the signal handling we now
+ # find ourselves in the opposite situation: the $pid process must
+ # now clean up the former main process tree reliably. And we cannot
+ # create a new process group for everything all-in-one because that
+ # would also destroy CTRL+C handling from the terminal.
+ warning "creating tarball failed: $@";
+ my $pgroup = getpgrp();
+ warning "sending SIGHUP to all processes in process group $pgroup";
+ kill HUP => -$pgroup;
+ $exitstatus = 1;
+ }
+ } else {
+ error "unknown format: $options->{format}";
+ }
+ close($rfh);
+ waitpid $pid, 0;
+ if ($? != 0) {
+ $exitstatus = 1;
+ }
+
+ # change signal handler message
+ $waiting_for = "cleanup";
+
+ if (any { $_ eq $options->{format} } ('directory')) {
+ # nothing to do
+ } elsif (any { $_ eq $options->{format} }
+ ('tar', 'squashfs', 'ext2', 'null')) {
+ if (!-e $options->{root}) {
+ error "$options->{root} does not exist";
+ }
+ info "removing tempdir $options->{root}...";
+ if ($options->{mode} eq 'unshare') {
+ # We don't have permissions to remove the directory outside
+ # the unshared namespace, so we remove it here.
+ # Since this is still inside the unshared namespace, there is
+ # no risk of removing anything important.
+ $pid = get_unshare_cmd(
+ sub {
+ # change CWD to chroot directory because find tries to
+ # chdir to the current directory which might not be
+ # accessible by the unshared user:
+ # find: Failed to restore initial working directory
+ 0 == system('env', "--chdir=$options->{root}", 'find',
+ $options->{root}, '-mount',
+ '-mindepth', '1', '-delete')
+ or error "rm failed: $?";
+ # ignore failure in case the unshared user doesn't have the
+ # required permissions -- we attempt again later if
+ # necessary
+ rmdir "$options->{root}";
+ },
+ \@idmap
+ );
+ waitpid $pid, 0;
+ $? == 0 or error "remove_tree failed";
+ # in unshare mode, the toplevel directory might've been created in
+ # a directory that the unshared user cannot change and thus cannot
+ # delete. We attempt its removal again outside as the normal user.
+ if (-e $options->{root}) {
+ rmdir "$options->{root}"
+ or error "cannot rmdir $options->{root}: $!";
+ }
+ } elsif (any { $_ eq $options->{mode} }
+ ('root', 'fakechroot', 'chrootless')) {
+ # without unshare, we use the system's rm to recursively remove the
+ # temporary directory just to make sure that we do not accidentally
+ # remove more than we should by using --one-file-system.
+ 0 == system('rm', '--interactive=never', '--recursive',
+ '--preserve-root', '--one-file-system', $options->{root})
+ or error "rm failed: $?";
+ } else {
+ error "unknown mode: $options->{mode}";
+ }
+ } else {
+ error "unknown format: $options->{format}";
+ }
+
+ if ($got_signal) {
+ $exitstatus = 1;
+ }
+
+ if ($exitstatus == 0) {
+ my $duration = Time::HiRes::time - $before;
+ info "success in " . (sprintf "%.04f", $duration) . " seconds";
+ exit 0;
+ }
+
+ error "mmdebstrap failed to run";
+ return 1;
+}
+
+main();
+
+__END__
+
+=head1 NAME
+
+mmdebstrap - multi-mirror Debian chroot creation
+
+=head1 SYNOPSIS
+
+B<mmdebstrap> [B<OPTION...>] [I<SUITE> [I<TARGET> [I<MIRROR>...]]]
+
+=head1 DESCRIPTION
+
+B<mmdebstrap> creates a Debian chroot of I<SUITE> into I<TARGET> from one or
+more I<MIRROR>s. It is meant as an alternative to the debootstrap tool (see
+section B<DEBOOTSTRAP>). In contrast to debootstrap it uses apt to resolve
+dependencies and is thus able to use more than one mirror and resolve more
+complex dependency relationships. See section B<OPERATION> for an overview of
+how B<mmdebstrap> works internally.
+
+The I<SUITE> option may either be a valid release code name (eg, sid, bookworm,
+trixie) or a symbolic name (eg, unstable, testing, stable, oldstable). Any
+suite name that works with apt on the given mirror will work. The I<SUITE>
+option is optional if no I<TARGET> and no I<MIRROR> option is provided. If
+I<SUITE> is missing, then the information of the desired suite has to come from
+standard input as part of a valid apt sources.list file or be set up via hooks.
+The value of the I<SUITE> argument will be used to determine which apt index to
+use for finding out the set of C<Essential:yes> packages and/or the set of
+packages with the right priority for the selected variant. This functionality
+can be disabled by choosing the empty string for I<SUITE>. See the section
+B<VARIANTS> for more information.
+
+The I<TARGET> option may either be the path to a directory, the path to a
+tarball filename, the path to a squashfs image, the path to an ext2 image, a
+FIFO, a character special device, or C<->. The I<TARGET> option is optional if
+no I<MIRROR> option is provided. If I<TARGET> is missing or if I<TARGET> is
+C<->, an uncompressed tarball will be sent to standard output. Without the
+B<--format> option, I<TARGET> will be used to choose the format. See the
+section B<FORMATS> for more information.
+
+The I<MIRROR> option may either be provided as a URI, in apt one-line format,
+as a path to a file in apt's one-line or deb822-format, or C<->. If no
+I<MIRROR> option is provided, then L<http://deb.debian.org/debian> is used as
+the default. If I<SUITE> does not refer to "unstable" or "testing", then
+I<SUITE>-updates and I<SUITE>-security mirrors are automatically added. If a
+I<MIRROR> option starts with "deb " or "deb-src " then it is used as a one-line
+format entry for apt's sources.list inside the chroot. If a I<MIRROR> option
+contains a "://" then it is interpreted as a mirror URI and the apt line inside
+the chroot is assembled as "deb [arch=A] B C D" where A is the host's native
+architecture, B is the I<MIRROR>, C is the given I<SUITE> and D is the
+components given via B<--components> (defaults to "main"). If a I<MIRROR>
+option happens to be an existing file, then its contents are written into the
+chroot's sources.list (if the first I<MIRROR> is a file in one-line format) or
+into the chroot's sources.list.d directory, named with the extension .list or
+.sources, depending on whether the file is in one-line or deb822 format,
+respectively. If I<MIRROR> is C<-> then standard input is pasted into the
+chroot's sources.list. More than one mirror can be specified and are appended
+to the chroot's sources.list in the given order. If you specify a https or tor
+I<MIRROR> and you want the chroot to be able to update itself, don't forget to
+also install the ca-certificates package, the apt-transport-https package for
+apt versions less than 1.5 and/or the apt-transport-tor package using the
+B<--include> option, as necessary.
+
+All status output is printed to standard error unless B<--logfile> is used to
+redirect it to a file or B<--quiet> or B<--silent> is used to suppress any
+output on standard error. Help and version information will be printed to
+standard error with the B<--help> and B<--version> options, respectively.
+Otherwise, an uncompressed tarball might be sent to standard output if
+I<TARGET> is C<-> or if no I<TARGET> was specified.
+
+=head1 OPTIONS
+
+Options are case insensitive. Short options may be bundled. Long options
+require a double dash and may be abbreviated to uniqueness. Options can be
+placed anywhere on the command line, even before or mixed with the I<SUITE>,
+I<TARGET>, and I<MIRROR> arguments. A double dash C<--> can be used to stop
+interpreting command line arguments as options to allow I<SUITE>, I<TARGET> and
+I<MIRROR> arguments that start with a single or double dash. Option order only
+matters for options that can be passed multiple times as documented below.
+
+=over 8
+
+=item B<-h,--help>
+
+Print synopsis and options of this man page and exit.
+
+=item B<--man>
+
+Show the full man page as generated from Perl POD in a pager. This requires
+the perldoc program from the perl-doc package. This is the same as running:
+
+ pod2man /usr/bin/mmdebstrap | man -l -
+
+=item B<--version>
+
+Print the B<mmdebstrap> version and exit.
+
+=item B<--variant>=I<name>
+
+Choose which package set to install. Valid variant I<name>s are B<extract>,
+B<custom>, B<essential>, B<apt>, B<required>, B<minbase>, B<buildd>,
+B<important>, B<debootstrap>, B<->, and B<standard>. The default variant is
+B<debootstrap>. See the section B<VARIANTS> for more information.
+
+=item B<--mode>=I<name>
+
+Choose how to perform the chroot operation and create a filesystem with
+ownership information different from the current user. Valid mode I<name>s are
+B<auto>, B<sudo>, B<root>, B<unshare>, B<fakeroot>, B<fakechroot> and
+B<chrootless>. The default mode is B<auto>. See the section B<MODES> for more
+information.
+
+=item B<--format>=I<name>
+
+Choose the output format. Valid format I<name>s are B<auto>, B<directory>,
+B<tar>, B<squashfs>, B<ext2> and B<null>. The default format is B<auto>. See
+the section B<FORMATS> for more information.
+
+=item B<--aptopt>=I<option>|I<file>
+
+Pass arbitrary I<option>s to apt. Will be permamently added to
+F</etc/apt/apt.conf.d/99mmdebstrap> inside the chroot. Use hooks for temporary
+configuration options. Can be specified multiple times. Each I<option> will be
+appended to 99mmdebstrap. A semicolon will be added at the end of the option if
+necessary. If the command line argument is an existing I<file>, the content of
+the file will be appended to 99mmdebstrap verbatim.
+
+Example: This is necessary for allowing old timestamps from snapshot.debian.org
+
+ --aptopt='Acquire::Check-Valid-Until "false"'
+ --aptopt='Apt::Key::gpgvcommand "/usr/libexec/mmdebstrap/gpgvnoexpkeysig"'
+
+Example: Settings controlling download of package description translations
+
+ --aptopt='Acquire::Languages { "environment"; "en"; }'
+ --aptopt='Acquire::Languages "none"'
+
+Example: Enable installing Recommends (by default B<mmdebstrap> doesn't)
+
+ --aptopt='Apt::Install-Recommends "true"'
+
+Example: Configure apt-cacher or apt-cacher-ng as an apt proxy
+
+ --aptopt='Acquire::http { Proxy "http://127.0.0.1:3142"; }'
+
+Example: For situations in which the apt sandbox user cannot access the chroot
+
+ --aptopt='APT::Sandbox::User "root"'
+
+Example: Minimizing the number of packages installed from experimental
+
+ --aptopt='APT::Solver "aspcud"'
+ --aptopt='APT::Solver::aspcud::Preferences
+ "-count(solution,APT-Release:=/a=experimental/),-removed,-changed,-new"'
+
+=item B<--keyring>=I<file>|I<directory>
+
+Change the default keyring to use by apt during the initial setup. This is
+similar to setting B<Dir::Etc::Trusted> and B<Dir::Etc::TrustedParts> using
+B<--aptopt> except that the latter setting will be permanently stored in the
+chroot while the keyrings passed via B<--keyring> will only be visible to apt
+as run by B<mmdebstrap>. Do not use B<--keyring> if apt inside the chroot needs
+to know about your keys after the initial chroot creation by B<mmdebstrap>.
+This option is mainly intended for users who use B<mmdebstrap> as a
+B<deboostrap> drop-in replacement. As such, it is probably not what you want to
+use if you use B<mmdebstrap> with more than a single mirror unless you pass it
+a directory containing all the keyrings you need.
+
+By default, the local setting of B<Dir::Etc::Trusted> and
+B<Dir::Etc::TrustedParts> are used to choose the keyring used by apt as run by
+B<mmdebstrap>. These two locations are set to F</etc/apt/trusted.gpg> and
+F</etc/apt/trusted.gpg.d> by default. Depending on whether a file or directory
+is passed to this option, the former and latter default can be changed,
+respectively. Since apt only supports a single keyring file and directory,
+respectively, you can B<not> use this option to pass multiple files and/or
+directories. Using the C<--keyring> argument in the following way is equal to
+keeping the default:
+
+ --keyring=/etc/apt/trusted.gpg --keyring=/etc/apt/trusted.gpg.d
+
+If you need to pass multiple keyrings, use the C<signed-by> option when
+specifying the mirror like this:
+
+ mmdebstrap mysuite out.tar "deb [signed-by=/path/to/key.gpg] http://..."
+
+Another reason to use C<signed-by> instead of B<--keyring> is if apt inside the
+chroot needs to know by what key the repository is signed even after the
+initial chroot creation.
+
+The C<signed-by> option will automatically be added to the final
+C<sources.list> if the keyring required for the selected I<SUITE> is not yet
+trusted by apt. Automatically adding the C<signed-by> option in these cases
+requires C<gpg> to be installed. If C<gpg> and C<ubuntu-archive-keyring> are
+installed, then you can create a Ubuntu Bionic chroot on Debian like this:
+
+ mmdebstrap bionic ubuntu-bionic.tar
+
+The resulting chroot will have a C<source.list> with a C<signed-by> option
+pointing to F</usr/share/keyrings/ubuntu-archive-keyring.gpg>.
+
+You do not need to use B<--keyring> or C<signed-by> if you placed the keys that
+apt needs to know about into F</etc/apt/trusted.gpg.d> in the B<--setup-hook>
+(which is before C<apt update> runs), for example by using the B<copy-in>
+special hook. You also need to copy your keys into the chroot explicitly if the
+key you passed via C<signed-by> points to a location that is not otherwise
+populated during chroot creation (for example by installing a keyring package).
+
+=item B<--dpkgopt>=I<option>|I<file>
+
+Pass arbitrary I<option>s to dpkg. Will be permanently added to
+F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> inside the chroot. Use hooks for temporary
+configuration options. Can be specified multiple times. Each I<option> will be
+appended to 99mmdebstrap. If the command line argument is an existing I<file>,
+the content of the file will be appended to 99mmdebstrap verbatim.
+
+Example: Exclude paths to reduce chroot size
+
+ --dpkgopt='path-exclude=/usr/share/man/*'
+ --dpkgopt='path-include=/usr/share/man/man[1-9]/*'
+ --dpkgopt='path-exclude=/usr/share/locale/*'
+ --dpkgopt='path-include=/usr/share/locale/locale.alias'
+ --dpkgopt='path-exclude=/usr/share/doc/*'
+ --dpkgopt='path-include=/usr/share/doc/*/copyright'
+ --dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*'
+
+=item B<--include>=I<pkg1>[,I<pkg2>,...]
+
+Comma or whitespace separated list of packages which will be installed in
+addition to the packages installed by the specified variant. The direct and
+indirect hard dependencies will also be installed. The behaviour of this
+option depends on the selected variant. The B<extract> and B<custom> variants
+install no packages by default, so for these variants, the packages specified
+by this option will be the only ones that get either extracted or installed by
+dpkg, respectively. For all other variants, apt is used to install the
+additional packages. Package names are directly passed to apt and thus, you
+can use apt features like C<pkg/suite>, C<pkg=version>, C<pkg->, use a glob or
+regex for C<pkg>, use apt patterns or pass a path to a .deb package file (see
+below for notes concerning passing the path to a .deb package file in
+B<unshare> mode). See L<apt(8)> for the supported syntax.
+
+The option can be specified multiple times and the packages are concatenated in
+the order in which they are given on the command line. If later list items are
+repeated, then they get dropped so that the resulting package list is free of
+duplicates. So the following are equivalent:
+
+ --include="pkg1/stable pkg2=1.0 pkg3-"
+ --include=pkg1/stable,pkg2=1.0,pkg3-,,,
+ --incl=pkg1/stable --incl="pkg2=1.0 pkg3-" --incl=pkg2=1.0,pkg3-
+
+Since the list of packages is separated by comma or whitespace, it is not
+possible to mix apt patterns or .deb package file paths containing either
+commas or whitespace with normal package names. If you do, your patterns and
+paths will be split by comma and whitespace as well and become useless. To pass
+such a pattern or package file path, put them into their own B<--include>
+option. If the argument to B<--include> starts with an apt pattern or with a
+file path, then it will not be split:
+
+ --include="?or(?priority(required), ?priority(important))"
+ --include="./path/to/deb with spaces/and,commas/foo.deb"
+
+Specifically, all arguments to B<--include> that start with a C<?>, C<!>, C<~>,
+C<(>, C</>, C<./> or C<../> are not split and treated as single arguments to
+apt. To add more packages, use multiple B<--include> options. To disable this
+detection of patterns and paths, start the argument to B<--include> with a
+comma or whitespace.
+
+If you pass the path to a .deb package file using B<--include>, B<mmdebstrap>
+will ensure that the path exists. If the path is a relative path, it will
+internally by converted to an absolute path. Since apt (outside the chroot)
+passes paths to dpkg (on the inside) verbatim, you have to make the .deb
+package available under the same path inside the chroot as well or otherwise
+dpkg inside the chroot will be unable to access it. This can be achieved using
+a setup-hook. A hook that automatically makes the contents of C<file://>
+mirrors as well as .deb packages given with B<--include> available inside the
+chroot is provided by B<mmdebstrap> as
+B<--hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount>. This hook
+takes care of copying all relevant file to their correct locations and cleans
+up those files at the end. In B<unshare> mode, the .deb package paths have to
+be accessible by the unshared user as well. This means that the package itself
+likely must be made world-readable and all directory components on the path to
+it world-executable.
+
+=item B<--components>=I<comp1>[,I<comp2>,...]
+
+Comma or whitespace separated list of components like main, contrib, non-free
+and non-free-firmware which will be used for all URI-only I<MIRROR> arguments.
+The option can be specified multiple times and the components are concatenated
+in the order in which they are given on the command line. If later list items
+are repeated, then they get dropped so that the resulting component list is
+free of duplicates. So the following are equivalent:
+
+ --components="main contrib non-free non-free-firmware"
+ --components=main,contrib,non-free,non-free-firmware
+ --comp=main --comp="contrib non-free" --comp="main,non-free-firmware"
+
+=item B<--architectures>=I<native>[,I<foreign1>,...]
+
+Comma or whitespace separated list of architectures. The first architecture is
+the I<native> architecture inside the chroot. The remaining architectures will
+be added to the foreign dpkg architectures. Without this option, the I<native>
+architecture of the chroot defaults to the native architecture of the system
+running B<mmdebstrap>. The option can be specified multiple times and values
+are concatenated. If later list items are repeated, then they get dropped so
+that the resulting list is free of duplicates. So the following are
+equivalent:
+
+ --architectures="amd64 armhf mipsel"
+ --architectures=amd64,armhf,mipsel
+ --arch=amd64 --arch="armhf mipsel" --arch=armhf,mipsel
+
+=item B<--simulate>, B<--dry-run>
+
+Run apt-get with B<--simulate>. Only the package cache is initialized but no
+binary packages are downloaded or installed. Use this option to quickly check
+whether a package selection within a certain suite and variant can in principle
+be installed as far as their dependencies go. If the output is a tarball, then
+no output is produced. If the output is a directory, then the directory will be
+left populated with the skeleton files and directories necessary for apt to run
+in it. No hooks are executed in with B<--simulate> or B<--dry-run>.
+
+=item B<--setup-hook>=I<command>
+
+Execute arbitrary I<command>s right after initial setup (directory creation,
+configuration of apt and dpkg, ...) but before any packages are downloaded or
+installed. At that point, the chroot directory does not contain any
+executables and thus cannot be chroot-ed into. See section B<HOOKS> for more
+information.
+
+Example: add additional apt sources entries on top of the default ones:
+
+ --setup-hook='echo "deb http..." > "$1"/etc/apt/sources.list.d/custom.list'
+
+Example: Setup chroot for installing a sub-essential busybox-based chroot
+with --variant=custom
+--include=dpkg,busybox,libc-bin,base-files,base-passwd,debianutils
+
+ --setup-hook='mkdir -p "$1/bin"'
+ --setup-hook='for p in awk cat chmod chown cp diff echo env grep less ln
+ mkdir mount rm rmdir sed sh sleep sort touch uname mktemp; do
+ ln -s busybox "$1/bin/$p"; done'
+ --setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"'
+ --setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"'
+
+For a more elegant way for setting up a sub-essential busybox-based chroot, see
+the B<--hook-dir> option below.
+
+=item B<--extract-hook>=I<command>
+
+Execute arbitrary I<command>s after the Essential:yes packages have been
+extracted but before installing them. See section B<HOOKS> for more
+information.
+
+Example: Install busybox symlinks
+
+ --extract-hook='chroot "$1" /bin/busybox --install -s'
+
+=item B<--essential-hook>=I<command>
+
+Execute arbitrary I<command>s after the Essential:yes packages have been
+installed but before installing the remaining packages. The hook is not
+executed for the B<extract> and B<custom> variants. See section B<HOOKS> for
+more information.
+
+Example: Enable unattended upgrades
+
+ --essential-hook='echo unattended-upgrades
+ unattended-upgrades/enable_auto_updates boolean true
+ | chroot "$1" debconf-set-selections'
+
+Example: Select Europe/Berlin as the timezone
+
+ --essential-hook='echo tzdata tzdata/Areas select Europe
+ | chroot "$1" debconf-set-selections'
+ --essential-hook='echo tzdata tzdata/Zones/Europe select Berlin
+ | chroot "$1" debconf-set-selections'
+
+=item B<--customize-hook>=I<command>
+
+Execute arbitrary I<command>s after the chroot is set up and all packages got
+installed but before final cleanup actions are carried out. See section
+B<HOOKS> for more information.
+
+Example: Add a user without a password
+
+ --customize-hook='chroot "$1" useradd --home-dir /home/user
+ --create-home user'
+ --customize-hook='chroot "$1" passwd --delete user'
+
+Example: set up F</etc/hostname> and F</etc/hosts>
+
+ --customize-hook='echo host > "$1/etc/hostname"'
+ --customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"'
+
+Example: to mimic B<debootstrap> behaviour, B<mmdebstrap> copies from the host.
+Remove them in a B<--customize-hook> to make the chroot reproducible across
+multiple hosts:
+
+ --customize-hook='rm "$1"/etc/resolv.conf'
+ --customize-hook='rm "$1"/etc/hostname'
+
+=item B<--hook-directory>=I<directory>
+
+Execute scripts in I<directory> with filenames starting with C<setup>,
+C<extract>, C<essential> or C<customize>, at the respective stages during an
+mmdebstrap run. The files must be marked executable. Their extension is
+ignored. Subdirectories are not traversed. This option is a short-hand for
+specifying the remaining four hook options individually for each file in the
+directory. If there are more than one script for a stage, then they are added
+alphabetically. This is useful in cases, where a user wants to run the same
+hooks frequently. For example, given a directory C<./hooks> with two scripts
+C<setup01-foo.sh> and C<setup02-bar.sh>, this call:
+
+ mmdebstrap --customize=./scriptA --hook-dir=./hooks --setup=./scriptB
+
+is equivalent to this call:
+
+ mmdebstrap --customize=./scriptA --setup=./hooks/setup01-foo.sh \
+ --setup=./hooks/setup02-bar.sh --setup=./scriptB
+
+The option can be specified multiple times and scripts are added to the
+respective hooks in the order the options are given on the command line. Thus,
+if the scripts in two directories depend upon each other, the scripts must be
+placed into a common directory and be named such that they get added in the
+correct order.
+
+Example 1: Run mmdebstrap with eatmydata
+
+ --hook-dir=/usr/share/mmdebstrap/hooks/eatmydata
+
+Example 2: Setup chroot for installing a sub-essential busybox-based chroot
+
+ --hook-dir=/usr/share/mmdebstrap/hooks/busybox
+
+Example 3: Automatically mount all directories referenced by C<file://> mirrors
+into the chroot
+
+ --hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount
+
+=item B<--skip>=I<stage>[,I<stage>,...]
+
+B<mmdebstrap> tries hard to implement sensible defaults and will try to stop
+you before shooting yourself in the foot. This option is for when you are sure
+you know what you are doing and allows one to skip certain actions and safety
+checks. See section B<OPERATION> for a list of possible arguments and their
+context. The option can be specified multiple times or you can separate
+multiple values by comma or whitespace.
+
+=item B<-q,--quiet>, B<-s,--silent>
+
+Do not write anything to standard error. If used together with B<--verbose> or
+B<--debug>, only the last option will take effect.
+
+=item B<-v,--verbose>
+
+Instead of progress bars, write the dpkg and apt output directly to standard
+error. If used together with B<--quiet> or B<--debug>, only the last option
+will take effect.
+
+=item B<-d,--debug>
+
+In addition to the output produced by B<--verbose>, write detailed debugging
+information to standard error. Errors will print a backtrace. If used together
+with B<--quiet> or B<--verbose>, only the last option will take effect.
+
+=item B<--logfile>=I<filename>
+
+Instead of writing status information to standard error, write it into the
+file given by I<filename>.
+
+=back
+
+=head1 MODES
+
+Creating a Debian chroot requires not only permissions for running chroot but
+also the ability to create files owned by the superuser. The selected mode
+decides which way this is achieved.
+
+=over 8
+
+=item B<auto>
+
+This mode automatically selects a fitting mode. If the effective user id is the
+one of the superuser, then the B<sudo> mode is chosen. Otherwise, the
+B<unshare> mode is picked if F</etc/subuid> and F</etc/subgid> are set up
+correctly. Should that not be the case and if the fakechroot binary exists, the
+B<fakechroot> mode is chosen.
+
+=item B<sudo>, B<root>
+
+This mode directly executes chroot and is the same mode of operation as is
+used by debootstrap. It is the only mode that can directly create a directory
+chroot with the right permissions. If the chroot directory is not accessible
+by the _apt user, then apt sandboxing will be automatically disabled. This mode
+needs to be able to mount and thus requires C<CAP_SYS_ADMIN>.
+
+=item B<unshare>
+
+When used as a normal (not root) user, this mode uses Linux user namespaces to
+allow unprivileged use of chroot and creation of files that appear to be owned
+by the superuser inside the unshared namespace. A tarball created in this mode
+will be bit-by-bit identical to a tarball created with the B<root> mode. With
+this mode, the only binaries that will run as the root user will be
+L<newuidmap(1)> and L<newgidmap(1)> via their setuid bit. Running those
+successfully requires F</etc/subuid> and F</etc/subgid> to have an entry for
+your username. This entry was usually created by L<adduser(8)> already.
+
+The unshared user will not automatically have access to the same files as you
+do. This is intentional and an additional security against unintended changes
+to your files that could theoretically result from running B<mmdebstrap> and
+package maintainer scripts. To copy files in and out of the chroot, either use
+globally readable or writable directories or use special hooks like B<copy-in>
+and B<copy-out>.
+
+Besides the user namespace, the mount, pid (process ids), uts (hostname) and
+ipc namespaces will be unshared as well. See the man pages of L<namespaces(7)>
+and L<unshare(2)> as well as the manual pages they are linking to.
+
+A directory chroot created with this mode will end up with wrong ownership
+information (seen from outside the unshared user namespace). For correct
+ownership information, the directory must be accessed from a user namespace
+with the right subuid/subgid offset, like so:
+
+ $ lxc-usernsexec -- lxc-unshare -s 'MOUNT|PID|UTSNAME|IPC' -- \
+ > /usr/sbin/chroot ./debian-rootfs /bin/bash
+
+Or without LXC:
+
+ $ mmdebstrap --unshare-helper /usr/sbin/chroot ./debian-rootfs /bin/bash
+
+Or, if you don't mind using superuser privileges and have systemd-nspawn
+available and you know your subuid/subgid offset (100000 in this example):
+
+ $ sudo systemd-nspawn --private-users=100000 \
+ > --directory=./debian-rootfs /bin/bash
+
+A directory created in B<unshare> mode cannot be removed the normal way.
+Instead, use something like this:
+
+ $ unshare --map-root-user --map-auto rm -rf ./debian-rootfs
+
+If this mode is used as the root user, the user namespace is not unshared (but
+the mount namespace and other still are) and created directories will have
+correct ownership information. This is also useful in cases where the root user
+wants the benefits of an unshared mount namespace to prevent accidentally
+messing up the system.
+
+=item B<fakeroot>, B<fakechroot>
+
+This mode will exec B<mmdebstrap> again under C<fakechroot fakeroot>. A
+directory chroot created with this mode will end up with wrong permissions. If
+you need a directory then run B<mmdebstrap> under C<fakechroot fakeroot -s
+fakeroot.env> and use C<fakeroot.env> later when entering the chroot with
+C<fakechroot fakeroot -i fakeroot.env chroot ...>. This mode will not work if
+maintainer scripts are unable to handle C<LD_PRELOAD> correctly like the
+package B<initramfs-tools> until version 0.132. This mode will also not work
+with a different libc inside the chroot than on the outside. See the section
+B<LIMITATIONS> in L<fakechroot(1)>.
+
+=item B<chrootless>
+
+Uses the dpkg option C<--force-script-chrootless> to install packages into
+I<TARGET> without dpkg and apt inside I<TARGET> but using apt and dpkg from the
+machine running B<mmdebstrap>. Maintainer scripts are run without chrooting
+into I<TARGET> and rely on their dependencies being installed on the machine
+running B<mmdebstrap>. Only very few packages support this mode. Namely, as of
+2022, not all essential packages support it. See
+https://wiki.debian.org/Teams/Dpkg/Spec/InstallBootstrap or the
+dpkg-root-support usertag of debian-dpkg@lists.debian.org in the Debian bug
+tracking system. B<WARNING>: if this option is used carelessly with packages
+that do not support C<DPKG_ROOT>, this mode can result in undesired changes to
+the system running B<mmdebstrap> because maintainer-scripts will be run without
+L<chroot(1)>. Make sure to run this mode without superuser privileges and/or
+inside a throw-away chroot environment like so:
+
+ mmdebstrap --variant=apt --include=mmdebstrap \
+ --customize-hook='chroot "$1" mmdebstrap --mode=chrootless
+ --variant=apt unstable chrootless.tar' \
+ --customize-hook='copy-out chrootless.tar .' unstable /dev/null
+
+=back
+
+=head1 VARIANTS
+
+All package sets also include the direct and indirect hard dependencies (but
+not recommends) of the selected package sets. The variants B<minbase>,
+B<buildd> and B<->, resemble the package sets that debootstrap would install
+with the same I<--variant> argument. The release with a name matching the
+I<SUITE> argument as well as the native architecture will be used to determine
+the C<Essential:yes> and priority values. To select packages with matching
+priority from any suite, specify the empty string for I<SUITE>. The default
+variant is B<debootstrap>.
+
+=over 8
+
+=item B<extract>
+
+Installs nothing by default (not even C<Essential:yes> packages). Packages
+given by the C<--include> option are extracted but will not be installed.
+
+=item B<custom>
+
+Installs nothing by default (not even C<Essential:yes> packages). Packages
+given by the C<--include> option will be installed. If another mode than
+B<chrootless> was selected and dpkg was not part of the included package set,
+then this variant will fail because it cannot configure the packages.
+
+=item B<essential>
+
+C<Essential:yes> packages. If I<SUITE> is a non-empty string, then only
+packages from the archive with suite or codename matching I<SUITE> will be
+considered for selection of C<Essential:yes> packages.
+
+=item B<apt>
+
+The B<essential> set plus apt. This variant uses the fact that B<apt> treats
+itself as essential and thus running C<apt-get dist-upgrade> without any
+packages installed will install the B<essential> set plus B<apt>. If you just
+want B<essential> and B<apt>, then this variant is faster than using the
+B<essential> variant and adding B<apt> via C<--include> because all packages
+get installed at once. The downside of this variant is, that if it should
+happen that an B<essential> package is not installable, then it will just get
+ignored without throwing an error.
+
+=item B<buildd>
+
+The B<essential> set plus apt and build-essential.
+It is roughly equivalent to running mmdebstrap with
+
+ --variant=essential --include="apt,build-essential"
+
+=item B<required>, B<minbase>
+
+The B<essential> set plus all packages with Priority:required.
+It is roughly equivalent to running mmdebstrap with
+
+ --variant=essential --include="?priority(required)"
+
+=item B<important>, B<debootstrap>, B<->
+
+The B<required> set plus all packages with Priority:important. This is the
+default of debootstrap. It is roughly equivalent to running mmdebstrap with
+
+ --variant=essential --include="~prequired|~pimportant"
+
+=item B<standard>
+
+The B<important> set plus all packages with Priority:standard.
+It is roughly equivalent to running mmdebstrap with
+
+ --variant=essential --include="~prequired|~pimportant|~pstandard"
+
+=back
+
+=head1 FORMATS
+
+The output format of B<mmdebstrap> is specified using the B<--format> option.
+Without that option the default format is I<auto>. The following formats exist:
+
+=over 8
+
+=item B<auto>
+
+When selecting this format (the default), the actual format will be inferred
+from the I<TARGET> positional argument. If I<TARGET> was not specified, then
+the B<tar> format will be chosen. If I<TARGET> happens to be F</dev/null> or if
+standard output is F</dev/null>, then the B<null> format will be chosen. If
+I<TARGET> is an existing directory, and does not equal to C<->, then the
+B<directory> format will be chosen. If I<TARGET> ends with C<.tar> or with one
+of the filename extensions listed in the section B<COMPRESSION>, or if
+I<TARGET> equals C<->, or if I<TARGET> is a named pipe (fifo) or if I<TARGET>
+is a character special file, then the B<tar> format will be chosen. If
+I<TARGET> ends with C<.squashfs> or C<.sqfs>, then the B<squashfs> format will
+be chosen. If I<TARGET> ends with C<.ext2> then the B<ext2> format will be
+chosen. If none of these conditions apply, the B<directory> format will be
+chosen.
+
+=item B<directory>, B<dir>
+
+A chroot directory will be created in I<TARGET>. If the directory already
+exists, it must either be empty or only contain an empty C<lost+found>
+directory. The special I<TARGET> C<-> does not work with this format because a
+directory cannot be written to standard output. If you need your directory be
+named C<->, then just explicitly pass the relative path to it like F<./->. If
+a directory is chosen as output in any other mode than B<sudo>, then its
+contents will have wrong ownership information and special device files will be
+missing. Refer to the section B<MODES> for more information.
+
+=item B<tar>
+
+A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
+C<$TMPDIR> is not set. A tarball of that directory will be stored in I<TARGET>
+or sent to standard output if I<TARGET> was omitted or if I<TARGET> equals
+C<->. If I<TARGET> ends with one of the filename extensions listed in the
+section B<COMPRESSION>, then a compressed tarball will be created. The tarball
+will be in POSIX 1003.1-2001 (pax) format and will contain extended attributes.
+To preserve the extended attributes, you have to pass B<--xattrs
+--xattrs-include='*'> to tar when extracting the tarball.
+
+=item B<squashfs>, B<sqfs>
+
+A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
+C<$TMPDIR> is not set. A tarball of that directory will be piped to the
+C<tar2sqfs> utility, which will create an xz compressed squashfs image with a
+blocksize of 1048576 bytes in I<TARGET>. The special I<TARGET> C<-> does not
+work with this format because C<tar2sqfs> can only write to a regular file. If
+you need your squashfs image be named C<->, then just explicitly pass the
+relative path to it like F<./->. The C<tar2sqfs> tool only supports a limited
+set of extended attribute prefixes. Therefore, extended attributes are disabled
+in the resulting image. If you need them, create a tarball first and remove the
+extended attributes from its pax headers. Refer to the B<EXAMPLES> section for
+how to achieve this.
+
+=item B<ext2>
+
+A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
+C<$TMPDIR> is not set. A tarball of that directory will be piped to the
+C<genext2fs> utility, which will create an ext2 image that will be
+approximately 90% full in I<TARGET>. The special I<TARGET> C<-> does not work
+with this format because C<genext2fs> can only write to a regular file. If you
+need your ext2 image be named C<->, then just explicitly pass the relative path
+to it like F<./->. To convert the result to an ext3 image, use C<tune2fs -O
+has_journal TARGET> and to convert it to ext4, use C<tune2fs -O
+extents,uninit_bg,dir_index,has_journal TARGET>. Since C<genext2fs> does not
+support extended attributes, the resulting image will not contain them.
+
+=item B<null>
+
+A temporary chroot directory will be created in C<$TMPDIR> or F</tmp> if
+C<$TMPDIR> is not set. After the bootstrap is complete, the temporary chroot
+will be deleted without being part of the output. This is most useful when the
+desired artifact is generated inside the chroot and it is transferred using
+special hooks such as B<sync-out>. It is also useful in situations where only
+the exit code or stdout or stderr of a process run in a hook is of interest.
+
+=back
+
+=head1 HOOKS
+
+This section describes properties of the hook options B<--setup-hook>,
+B<--extract-hook>, B<--essential-hook> and B<--customize-hook> which are common
+to all four of them. Any information specific to each hook is documented under
+the specific hook options in the section B<OPTIONS>.
+
+The options can be specified multiple times and the commands are executed in
+the order in which they are given on the command line. There are four different
+types of hook option arguments. If the argument passed to the hook option
+starts with C<copy-in>, C<copy-out>, C<tar-in>, C<tar-out>, C<upload> or
+C<download> followed by a space, then the hook is interpreted as a special
+hook. Otherwise, if I<command> is an existing executable file from C<$PATH> or
+if I<command> does not contain any shell metacharacters, then I<command> is
+directly exec-ed with the path to the chroot directory passed as the first
+argument. Otherwise, I<command> is executed under I<sh> and the chroot
+directory can be accessed via I<$1>. Most environment variables set by
+B<mmdebstrap> (like C<DEBIAN_FRONTEND>, C<LC_ALL> and C<PATH>) are preserved.
+Most notably, C<APT_CONFIG> is being unset. If you need the path to
+C<APT_CONFIG> as written by mmdebstrap it can be found in the
+C<MMDEBSTRAP_APT_CONFIG> environment variable. All environment variables set by
+the user are preserved, except for C<TMPDIR> which is cleared. See section
+B<TMPDIR>. Furthermore, C<MMDEBSTRAP_MODE> will store the mode set by
+B<--mode>, C<MMDEBSTRAP_FORMAT> stores the format chosen by B<--format>,
+C<MMDEBSTRAP_HOOK> stores which hook is currently run (setup, extract,
+essential, customize), C<MMDEBSTRAP_ARGV0> stores the name of the binary with
+which B<mmdebstrap> was executed and C<MMDEBSTRAP_VERBOSITY> stores the
+numerical verbosity level (0 for no output, 1 for normal, 2 for verbose and 3
+for debug output). The C<MMDEBSTRAP_INCLUDE> variable stores the list of
+packages, apt patterns or file paths given by the B<--include> option,
+separated by a comma and with commas and percent signs in the option values
+urlencoded. If I<SUITE> name was supplied, it's stored in C<MMDEBSTRAP_SUITE>.
+
+In special hooks, the paths inside the chroot are relative to the root
+directory of the chroot. The path on the outside is relative to current
+directory of the original B<mmdebstrap> invocation. The path inside the chroot
+must already exist. Paths outside the chroot are created as necessary.
+
+In B<fakechroot> mode, C<tar>, or C<sh> and C<cat> have to be run inside the
+chroot or otherwise, symlinks will be wrongly resolved and/or permissions will
+be off. This means that the special hooks might fail in B<fakechroot> mode for
+the B<setup> hook or for the B<extract> and B<custom> variants if no C<tar> or
+C<sh> and C<cat> is available inside the chroot.
+
+=over 8
+
+=item B<copy-out> I<pathinside> [I<pathinside> ...] I<pathoutside>
+
+Recursively copies one or more files and directories recursively from
+I<pathinside> inside the chroot to I<pathoutside> outside of the chroot.
+
+=item B<copy-in> I<pathoutside> [I<pathoutside> ...] I<pathinside>
+
+Recursively copies one or more files and directories into the chroot into,
+placing them into I<pathinside> inside of the chroot.
+
+=item B<sync-out> I<pathinside> I<pathoutside>
+
+Recursively copy everything inside I<pathinside> inside the chroot into
+I<pathoutside>. In contrast to B<copy-out>, this command synchronizes the
+content of I<pathinside> with the content of I<pathoutside> without deleting
+anything from I<pathoutside> but overwriting content as necessary. Use this
+command over B<copy-out> if you don't want to create a new directory outside
+the chroot but only update the content of an existing directory.
+
+=item B<sync-in> I<pathoutside> I<pathinside>
+
+Recursively copy everything inside I<pathoutside> into I<pathinside> inside the
+chroot. In contrast to B<copy-in>, this command synchronizes the content of
+I<pathoutside> with the content of I<pathinside> without deleting anything from
+I<pathinside> but overwriting content as necessary. Use this command over
+B<copy-in> if you don't want to create a new directory inside the chroot but
+only update the content of an existing directory.
+
+=item B<tar-in> I<outside.tar> I<pathinside>
+
+Unpacks a tarball I<outside.tar> from outside the chroot into a certain
+location I<pathinside> inside the chroot. In B<unshare> mode, device nodes
+cannot be created. To ignore device nodes in tarballs, use
+B<--skip=tar-in/mknod>.
+
+=item B<tar-out> I<pathinside> I<outside.tar>
+
+Packs the path I<pathinside> from inside the chroot into a tarball, placing it
+into a certain location I<outside.tar> outside the chroot.
+
+=item B<download> I<fileinside> I<fileoutside>
+
+Copy the file given by I<fileinside> from inside the chroot to outside the
+chroot as I<fileoutside>. In contrast to B<copy-out>, this command only
+handles files and not directories. To copy a directory recursively out of the
+chroot, use B<copy-out> or B<tar-out>. Its advantage is, that by being able to
+specify the full path on the outside, including the filename, the file on the
+outside can have a different name from the file on the inside. In contrast to
+B<copy-out> and B<tar-out>, this command follows symlinks.
+
+=item B<upload> I<fileoutside> I<fileinside>
+
+Copy the file given by I<fileoutside> from outside the chroot to inside the
+chroot as I<fileinside>. In contrast to B<copy-in>, this command only
+handles files and not directories. To copy a directory recursively into the
+chroot, use B<copy-in> or B<tar-in>. Its advantage is, that by being able to
+specify the full path on the inside, including the filename, the file on the
+inside can have a different name from the file on the outside. In contrast to
+B<copy-in> and B<tar-in>, permission and ownership information will not be
+retained.
+
+=back
+
+=head1 OPERATION
+
+This section gives an overview of the different steps to create a chroot. At
+its core, what B<mmdebstrap> does can be put into a 14 line shell script:
+
+ mkdir -p "$2/etc/apt" "$2/var/cache" "$2/var/lib"
+ cat << END > "$2/apt.conf"
+ Apt::Architecture "$(dpkg --print-architecture)";
+ Apt::Architectures "$(dpkg --print-architecture)";
+ Dir "$(cd "$2" && pwd)";
+ Dir::Etc::Trusted "$(eval "$(apt-config shell v Dir::Etc::Trusted/f)"; printf "$v")";
+ Dir::Etc::TrustedParts "$(eval "$(apt-config shell v Dir::Etc::TrustedParts/d)"; printf "$v")";
+ END
+ echo "deb http://deb.debian.org/debian/ $1 main" > "$2/etc/apt/sources.list"
+ APT_CONFIG="$2/apt.conf" apt-get update
+ APT_CONFIG="$2/apt.conf" apt-get --yes --download-only install '?essential'
+ for f in "$2"/var/cache/apt/archives/*.deb; do dpkg-deb --extract "$f" "$2"; done
+ chroot "$2" sh -c "dpkg --install --force-depends /var/cache/apt/archives/*.deb"
+
+The additional complexity of B<mmdebstrap> is to support operation without
+superuser privileges, bit-by-bit reproducible output, hooks and foreign
+architecture support.
+
+The remainder of this section explains what B<mmdebstrap> does step-by-step.
+
+=over 8
+
+=item B<check>
+
+Upon startup, several checks are carried out, like:
+
+=over 4
+
+=item * whether required utilities (apt, dpkg, tar) are installed
+
+=item * which mode to use and whether prerequisites are met
+
+=item * do not allow chrootless mode as root (without fakeroot) unless inside a chroot. This check can be disabled using B<--skip=check/chrootless>
+
+=item * whether the requested architecture can be executed (requires arch-test) using qemu binfmt_misc support. This requires arch-test and can be disabled using B<--skip=check/qemu>
+
+=item * how the apt sources can be assembled from I<SUITE>, I<MIRROR> and B<--components> and/or from standard input as deb822 or one-line format and whether the required GPG keys exist.
+
+=item * which output format to pick depending on the B<--format> argument or name of I<TARGET> or its type.
+
+=item * whether the output directory is empty. This check can be disabled using B<--skip=check/empty>
+
+=item * whether adding a C<signed-by> to C<apt/sources.list> is necessary. This requires gpg and can be disabled using B<--skip=check/signed-by>
+
+=back
+
+=item B<setup>
+
+The following tasks are carried out unless B<--skip=setup> is used:
+
+=over 4
+
+=item * create required directories
+
+=item * write out the temporary apt config file
+
+=item * populates F</etc/apt/apt.conf.d/99mmdebstrap> and F</etc/dpkg/dpkg.cfg.d/99mmdebstrap> with config options from B<--aptopt> and B<--dpkgopt>, respectively
+
+=item * write out F</etc/apt/sources.list>
+
+=item * copy over F</etc/resolv.conf> and F</etc/hostname>
+
+=item * populate F</dev> if mknod is possible
+
+=back
+
+=item B<setup-hook>
+
+Run B<--setup-hook> options and all F<setup*> scripts in B<--hook-dir>.
+
+=item B<update>
+
+Runs C<apt-get update> using the temporary apt configuration file created in
+the B<setup> step. This can be disabled using B<--skip=update>.
+
+=item B<download>
+
+In the B<extract> and B<custom> variants, C<apt-get install> is used to
+download all the packages requested via the B<--include> option. The B<apt>
+variant uses the fact that libapt treats the C<apt> packages as implicitly
+essential to download only all C<Essential:yes> packages plus apt using
+C<apt-get dist-upgrade>. In the remaining variants, all Packages files
+downloaded by the B<update> step are inspected to find the C<Essential:yes>
+package set as well as all packages of the required priority. If I<SUITE> is a
+non-empty string, then only packages from the archive with suite or codename
+matching I<SUITE> will be considered for selection of C<Essential:yes>
+packages.
+
+=item B<mount>
+
+Mount relevant device nodes, F</proc> and F</sys> into the chroot and unmount
+them afterwards. This can be disabled using B<--skip=chroot/mount> or
+specifically by B<--skip=chroot/mount/dev>, B<--skip=chroot/mount/proc> and
+B<--skip=chroot/mount/sys>, respectively. B<mmdebstrap> will disable running
+services by temporarily moving F</usr/sbin/policy-rc.d> and
+F</usr/sbin/start-stop-daemon> if they exist. This can be disabled with
+B<--skip=chroot/policy-rc.d> and B<--skip=chroot/start-stop-daemon>,
+respectively.
+
+=item B<extract>
+
+Extract the downloaded packages into the rootfs.
+
+=item B<prepare>
+
+In B<fakechroot> mode, environment variables C<LD_LIBRARY_PATH> will be set up
+correctly. For foreign B<fakechroot> environments, C<LD_LIBRARY_PATH> and
+C<QEMU_LD_PREFIX> are set up accordingly. This step is not carried out in
+B<extract> mode and neither for the B<chrootless> variant.
+
+=item B<extract-hook>
+
+Run B<--extract-hook> options and all F<extract*> scripts in B<--hook-dir>.
+
+=item B<essential>
+
+Uses C<dpkg --install> to properly install all packages that have been
+extracted before. Removes all packages downloaded in the B<download> step,
+except those which were present in F</var/cache/apt/archives/> before (if any).
+This can be disabled using B<--skip=essential/unlink>. This step is not carried
+out in B<extract> mode.
+
+=item B<essential-hook>
+
+Run B<--essential-hook> options and all F<essential*> scripts in B<--hook-dir>.
+This step is not carried out in B<extract> mode.
+
+=item B<install>
+
+Install the apt package into the chroot, if necessary and then run apt from
+inside the chroot to install all remaining packages. This step is not carried
+out in B<extract> mode.
+
+=item B<customize-hook>
+
+Run B<--customize-hook> options and all F<customize*> scripts in B<--hook-dir>.
+This step is not carried out in B<extract> mode.
+
+=item B<unmount>
+
+Unmount everything that was mounted during the B<mount> stage and restores
+F</usr/sbin/policy-rc.d> and F</usr/sbin/start-stop-daemon> if necessary.
+
+=item B<cleanup>
+
+Performs cleanup tasks, unless B<--skip=cleanup> is used:
+
+=over 4
+
+=item * Removes the package lists (unless B<--skip=cleanup/apt/lists>) and apt cache (unless B<--skip=cleanup/apt/cache>). Both removals can be disabled by using B<--skip=cleanup/apt>.
+
+=item * Remove all files that were put into the chroot for setup purposes, like F</etc/apt/apt.conf.d/00mmdebstrap> and the temporary apt config. This can be disabled using B<--skip=cleanup/mmdebstrap>.
+
+=item * Remove files that make the result unreproducible and write the empty string to /etc/machine-id if it exists. This can be disabled using B<--skip=cleanup/reproducible>. Note that this will not remove files that make the result unreproducible on machines with differing F</etc/resolv.conf> or F</etc/hostname>. Use a B<--customize-hook> to make those two files reproducible across multiple hosts. See section C<SOURCE_DATE_EPOCH> for more information. The following files will be removed:
+
+=over 4
+
+=item * F</var/log/dpkg.log>
+
+=item * F</var/log/apt/history.log>
+
+=item * F</var/log/apt/term.log>
+
+=item * F</var/log/alternatives.log>
+
+=item * F</var/cache/ldconfig/aux-cache>
+
+=item * F</var/log/apt/eipp.log.xz>
+
+=item * F</var/lib/dbus/machine-id>
+
+=back
+
+=item * Remove everything in F</run> inside the chroot. This can be disabled using B<--skip=cleanup/run>.
+
+=item * Remove everything in F</tmp> inside the chroot. This can be disabled using B<--skip=cleanup/tmp>.
+
+=back
+
+=item B<output>
+
+For formats other than B<directory>, pack up the temporary chroot directory
+into a tarball, ext2 image or squashfs image and delete the temporary chroot
+directory.
+
+If B<--skip=output/dev> is added, the resulting chroot will not contain the
+device nodes, directories and symlinks that B<debootstrap> creates but just
+an empty /dev as created by B<base-files>.
+
+If B<--skip=output/mknod> is added, the resulting chroot will not contain
+device nodes (neither block nor character special devices). This is useful
+if the chroot tarball is to be exatracted in environments where mknod does
+not function like in unshared user namespaces.
+
+=back
+
+=head1 EXAMPLES
+
+Use like debootstrap:
+
+ $ sudo mmdebstrap unstable ./unstable-chroot
+
+Without superuser privileges:
+
+ $ mmdebstrap unstable unstable-chroot.tar
+
+With no command line arguments at all. The chroot content is entirely defined
+by a sources.list file on standard input.
+
+ $ mmdebstrap < /etc/apt/sources.list > unstable-chroot.tar
+
+Since the tarball is output on stdout, members of it can be excluded using tar
+on-the-fly. For example the /dev directory can be removed from the final
+tarbal in cases where it is to be extracted by a non-root user who cannot
+create device nodes:
+
+ $ mmdebstrap unstable | tar --delete ./dev > unstable-chroot.tar
+
+Create a tarball for use with C<sbuild --chroot-mode=unshare>:
+
+ $ mmdebstrap --variant=buildd unstable ~/.cache/sbuild/unstable-amd64.tar
+
+Instead of a tarball, a squashfs image can be created:
+
+ $ mmdebstrap unstable unstable-chroot.squashfs
+
+By default, B<mmdebstrap> runs B<tar2sqfs> with C<--no-skip --exportable
+--compressor xz --block-size 1048576>. To choose a different set of options,
+and to filter out all extended attributes not supported by B<tar2sqfs>, pipe
+the output of B<mmdebstrap> into B<tar2sqfs> manually like so:
+
+ $ mmdebstrap unstable \
+ | mmtarfilter --pax-exclude='*' \
+ --pax-include='SCHILY.xattr.user.*' \
+ --pax-include='SCHILY.xattr.trusted.*' \
+ --pax-include='SCHILY.xattr.security.*' \
+ | tar2sqfs --quiet --no-skip --force --exportable --compressor xz \
+ --block-size 1048576 unstable-chroot.squashfs
+
+By default, debootstrapping a stable distribution will add mirrors for security
+and updates to the sources.list.
+
+ $ mmdebstrap stable stable-chroot.tar
+
+If you don't want this behaviour, you can override it by manually specifying a
+mirror in various different ways:
+
+ $ mmdebstrap stable stable-chroot.tar http://deb.debian.org/debian
+ $ mmdebstrap stable stable-chroot.tar "deb http://deb.debian.org/debian stable main"
+ $ mmdebstrap stable stable-chroot.tar /path/to/sources.list
+ $ mmdebstrap stable stable-chroot.tar - < /path/to/sources.list
+
+Drop locales (but not the symlink to the locale name alias database),
+translated manual packages (but not the untranslated ones), and documentation
+(but not copyright and Debian changelog).
+
+ $ mmdebstrap --variant=essential \
+ --dpkgopt='path-exclude=/usr/share/man/*' \
+ --dpkgopt='path-include=/usr/share/man/man[1-9]/*' \
+ --dpkgopt='path-exclude=/usr/share/locale/*' \
+ --dpkgopt='path-include=/usr/share/locale/locale.alias' \
+ --dpkgopt='path-exclude=/usr/share/doc/*' \
+ --dpkgopt='path-include=/usr/share/doc/*/copyright' \
+ --dpkgopt='path-include=/usr/share/doc/*/changelog.Debian.*' \
+ unstable debian-unstable.tar
+
+Create a bootable USB Stick that boots into a full Debian desktop:
+
+ $ mmdebstrap --aptopt='Apt::Install-Recommends "true"' --customize-hook \
+ 'chroot "$1" adduser --gecos user --disabled-password user' \
+ --customize-hook='echo 'user:live' | chroot "$1" chpasswd' \
+ --customize-hook='echo host > "$1/etc/hostname"' \
+ --customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
+ --include=linux-image-amd64,task-desktop unstable debian-unstable.tar
+ $ cat << END > extlinux.conf
+ > default linux
+ > timeout 0
+ >
+ > label linux
+ > kernel /vmlinuz
+ > append initrd=/initrd.img root=LABEL=rootfs
+ END
+ # You can use $(sudo blockdev --getsize64 /dev/sdXXX) to get the right
+ # image size for the target medium in bytes
+ $ guestfish -N debian-unstable.img=disk:8G -- \
+ part-disk /dev/sda mbr : \
+ part-set-bootable /dev/sda 1 true : \
+ mkfs ext4 /dev/sda1 : \
+ set-label /dev/sda1 rootfs : \
+ mount /dev/sda1 / : \
+ tar-in debian-unstable.tar / xattrs:true : \
+ upload /usr/lib/EXTLINUX/mbr.bin /boot/mbr.bin : \
+ copy-file-to-device /boot/mbr.bin /dev/sda size:440 : \
+ extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
+ $ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
+ $ sudo dd if=debian-unstable.img of=/dev/sdXXX status=progress
+
+On architectures without extlinux you can also boot using grub2:
+
+ $ mmdebstrap --include=linux-image-amd64,grub2,systemd-sysv unstable fs.tar
+ $ guestfish -N debian-unstable.img=disk:2G -- \
+ part-disk /dev/sda mbr : \
+ part-set-bootable /dev/sda 1 true : \
+ mkfs ext4 /dev/sda1 : \
+ set-label /dev/sda1 rootfs : \
+ mount /dev/sda1 / : \
+ tar-in fs.tar / xattrs:true : \
+ command "grub-install /dev/sda" : \
+ command update-grub : \
+ sync : umount / : shutdown
+ $ qemu-system-x86_64 -m 1G -enable-kvm debian-unstable.img
+
+Build libdvdcss2.deb without installing installing anything or changing apt
+sources on the current system:
+
+ $ mmdebstrap --variant=apt --components=main,contrib --include=libdvd-pkg \
+ --customize-hook='chroot $1 /usr/lib/libdvd-pkg/b-i_libdvdcss.sh' \
+ | tar --extract --verbose --strip-components=4 \
+ --wildcards './usr/src/libdvd-pkg/libdvdcss2_*_*.deb'
+ $ ls libdvdcss2_*_*.deb
+
+Use as replacement for autopkgtest-build-qemu and vmdb2 for all architectures
+supporting EFI booting (amd64, arm64, armhf, i386, riscv64), use a convenience
+wrapper around B<mmdebstrap>:
+
+ $ mmdebstrap-autopkgtest-build-qemu unstable ./autopkgtest.img
+
+Use as replacement for autopkgtest-build-qemu and vmdb2 on architectures
+supporting extlinux (amd64 and i386):
+
+ $ mmdebstrap --variant=important --include=linux-image-amd64 \
+ --customize-hook='chroot "$1" passwd --delete root' \
+ --customize-hook='chroot "$1" useradd --home-dir /home/user --create-home user' \
+ --customize-hook='chroot "$1" passwd --delete user' \
+ --customize-hook='echo host > "$1/etc/hostname"' \
+ --customize-hook='echo "127.0.0.1 localhost host" > "$1/etc/hosts"' \
+ --customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed \
+ unstable debian-unstable.tar
+ $ cat << END > extlinux.conf
+ > default linux
+ > timeout 0
+ >
+ > label linux
+ > kernel /vmlinuz
+ > append initrd=/initrd.img root=/dev/vda1 rw console=ttyS0
+ END
+ $ guestfish -N debian-unstable.img=disk:8G -- \
+ part-disk /dev/sda mbr : \
+ part-set-bootable /dev/sda 1 true : \
+ mkfs ext4 /dev/sda1 : mount /dev/sda1 / : \
+ tar-in debian-unstable.tar / xattrs:true : \
+ upload /usr/lib/EXTLINUX/mbr.bin /boot/mbr.bin : \
+ copy-file-to-device /boot/mbr.bin /dev/sda size:440 : \
+ extlinux / : copy-in extlinux.conf / : sync : umount / : shutdown
+ $ qemu-img convert -O qcow2 debian-unstable.img debian-unstable.qcow2
+
+As a debootstrap wrapper to run it without superuser privileges but using Linux
+user namespaces instead. This fixes Debian bug #829134.
+
+ $ mmdebstrap --variant=custom --mode=unshare \
+ --setup-hook='debootstrap unstable "$1"' \
+ - debian-debootstrap.tar
+
+Build a non-Debian chroot like Ubuntu bionic:
+
+ $ mmdebstrap --aptopt='Dir::Etc::Trusted
+ "/usr/share/keyrings/ubuntu-keyring-2012-archive.gpg"' bionic bionic.tar
+
+If, for some reason, you cannot use a caching proxy like apt-cacher or
+apt-cacher-ng, you can use the B<sync-in> and B<sync-out> special hooks to
+synchronize a directory outside the chroot with F</var/cache/apt/archives>
+inside the chroot.
+
+ $ mmdebstrap --variant=apt --skip=essential/unlink \
+ --setup-hook='mkdir -p ./cache "$1"/var/cache/apt/archives/' \
+ --setup-hook='sync-in ./cache /var/cache/apt/archives/' \
+ --customize-hook='sync-out /var/cache/apt/archives ./cache' \
+ unstable /dev/null
+
+Instead of copying potentially large amounts of data with B<sync-in> you can
+also use a bind-mount in combination with a C<file://> mirror to make packages
+from the outside available inside the chroot:
+
+ $ mmdebstrap --variant=apt --skip=essential/unlink \
+ --setup-hook='mkdir "$1/tmp/mirror"' \
+ --setup-hook='mount -o ro,bind /tmp/mirror "$1/tmp/mirror"' \
+ --customize-hook='sync-out /var/cache/apt/archives ./cache' \
+ --customize-hook='umount "$1/tmp/mirror"; rmdir "$1/tmp/mirror";' \
+ unstable /dev/null file:///tmp/mirror http://deb.debian.org/debian
+
+To automatically mount all directories referenced by C<file://> mirrors
+into the chroot you can use a hook:
+
+ $ mmdebstrap --variant=apt \
+ --hook-dir=/usr/share/mmdebstrap/hooks/file-mirror-automount \
+ unstable /dev/null file:///tmp/mirror1 file:///tmp/mirror2
+
+Create a system that can be used with docker:
+
+ $ mmdebstrap unstable | sudo docker import - debian
+ [...]
+ $ sudo docker run -it --rm debian whoami
+ root
+ $ sudo docker rmi debian
+
+Create and boot a qemu virtual machine for an arbitrary architecture using
+the B<debvm-create> wrapper script around B<mmdebstrap>:
+
+ $ debvm-create -r stable -- --architecture=riscv64
+ $ debvm-run
+
+Create a system that can be used with podman:
+
+ $ mmdebstrap unstable | podman import - debian
+ [...]
+ $ podman run --network=none -it --rm debian whoami
+ root
+ $ podman rmi debian
+
+As a docker/podman replacement:
+
+ $ mmdebstrap unstable chroot.tar
+ [...]
+ $ mmdebstrap --variant=custom --skip=update,tar-in/mknod \
+ --setup-hook='tar-in chroot.tar /' \
+ --customize-hook='chroot "$1" whoami' unstable /dev/null
+ [...]
+ root
+ $ rm chroot.tar
+
+You can re-use a chroot tarball created with mmdebstrap for further refinement.
+Say you want to create a minimal chroot and a chroot with more packages
+installed, then instead of downloading and installing the essential packages
+twice you can instead build on top of the already present minimal chroot:
+
+ $ mmdebstrap --variant=apt unstable chroot.tar
+ $ mmdebstrap --variant=custom --skip=update,setup,cleanup,tar-in/mknod \
+ --setup-hook='tar-in chroot.tar /' \
+ --customize-hook='chroot "$1" apt-get install --yes pkg1 pkg2' \
+ '' chroot-full.tar
+
+=head1 ENVIRONMENT VARIABLES
+
+=over 8
+
+=item C<SOURCE_DATE_EPOCH>
+
+By setting C<SOURCE_DATE_EPOCH> the result will be reproducible across multiple
+runs with the same options and mirror content. Note that for debootstrap
+compatibility, B<mmdebstrap> will copy the host's F</etc/resolv.conf> and
+F</etc/hostname> into the chroot. This means that the B<mmdebstrap> output will
+differ if it is run on machines with differing F</etc/resolv.conf> and
+F</etc/hostname> contents. To make the result reproducible across different
+hosts, you need to manually either delete both files from the output:
+
+ $ mmdebstrap --customize-hook='rm "$1"/etc/resolv.conf' \
+ --customize-hook='rm "$1"/etc/hostname' ...
+
+or fill them with reproducible content:
+
+ $ mmdebstrap --customize-hook='echo nameserver X > "$1"/etc/resolv.conf' \
+ --customize-hook='echo host > "$1"/etc/hostname' ...
+
+=item C<TMPDIR>
+
+When creating a tarball, a temporary directory is populated with the rootfs
+before the tarball is packed. The location of that temporary directory will be
+in F</tmp> or the location pointed to by C<TMPDIR> if that environment variable
+is set. Setting C<TMPDIR> to a different directory than F</tmp> is useful if
+you have F</tmp> on a tmpfs that is too small for your rootfs.
+
+If you set C<TMPDIR> in B<unshare> mode, then the unshared user must be able to
+access the directory. This means that the directory itself must be
+world-writable and all its ancestors must be at least world-executable.
+
+Since C<TMPDIR> is only valid outside the chroot, the variable is being unset
+when running hook scripts. If you need a valid temporary directory in a hook,
+consider using F</tmp> inside your target directory.
+
+=back
+
+=head1 DEBOOTSTRAP
+
+This section lists some differences to debootstrap.
+
+=over 8
+
+=item * More than one mirror possible
+
+=item * Default mirrors for stable releases include updates and security mirror
+
+=item * Multiple ways to operate as non-root: fakechroot and unshare
+
+=item * twice as fast
+
+=item * Can create a chroot with only C<Essential:yes> packages and their deps
+
+=item * Reproducible output by default if $SOURCE_DATE_EPOCH is set
+
+=item * Can create output on filesystems with nodev set
+
+=item * apt cache and lists are cleaned at the end
+
+=item * foreign architecture chroots using qemu-user
+
+=back
+
+Limitations in comparison to debootstrap:
+
+=over 8
+
+=item * Only runs on systems with apt installed (Debian and derivatives)
+
+=item * No I<SCRIPT> argument (use hooks instead)
+
+=item * Some debootstrap options don't exist, namely:
+
+I<--second-stage>, I<--exclude>, I<--resolve-deps>, I<--force-check-gpg>,
+I<--merged-usr> and I<--no-merged-usr>
+
+=back
+
+=head1 MERGED-/USR
+
+B<mmdebstrap> will create a merged-/usr chroot or not depending on whether
+packages setting up merged-/usr (i.e. the B<usrmerge> package) are installed or
+not. In Debian, the essential package B<init-system-helpers> depends on the
+B<usrmerge> package, starting with Debian 12 (Bookworm).
+
+Before Debian 12 (Bookworm), to force B<mmdebstrap> to create a chroot with
+merged-/usr using symlinks, either explicitly install the B<usrmerge> package:
+
+ --include=usrmerge
+
+or setup merged-/usr using the debootstrap-method which takes care of the
+architecture specific symlinks and installs the B<usr-is-merged> package.
+
+ --hook-dir=/usr/share/mmdebstrap/hooks/merged-usr
+
+To force B<mmdebstrap> to create a chroot without merged-/usr even after the
+Debian 12 (Bookworm) release, you can use the following hook:
+
+ --hook-dir=/usr/share/mmdebstrap/hooks/no-merged-usr
+
+This will write "this system will not be supported in the future" into
+F</etc/unsupported-skip-usrmerge-conversion> inside the chroot and install the
+B<usr-is-merged> package to avoid the installation of the B<usrmerge> package
+and its dependencies.
+
+If you are using B<mmdebstrap> in a setup where you do not know upfront whether
+the chroot you are creating should be merged-/usr or not and you want to avoid
+installation of the B<usrmerge> package and it's dependencies, you can use:
+
+ --hook-dir=/usr/share/mmdebstrap/hooks/maybe-merged-usr
+
+That hook will use the availability of the B<usr-is-merged> package to decide
+whether to call the B<merged-usr> hook or not.
+
+=head1 COMPRESSION
+
+B<mmdebstrap> will choose a suitable compressor for the output tarball
+depending on the filename extension. The following mapping from filename
+extension to compressor applies:
+
+ extension compressor
+ --------------------
+ .tar none
+ .gz gzip
+ .tgz gzip
+ .taz gzip
+ .Z compress
+ .taZ compress
+ .bz2 bzip2
+ .tbz bzip2
+ .tbz2 bzip2
+ .tz2 bzip2
+ .lz lzip
+ .lzma lzma
+ .tlz lzma
+ .lzo lzop
+ .lz4 lz4
+ .xz xz
+ .txz xz
+ .zst zstd
+
+To change compression specific options, either use the respecitve environment
+variables like B<XZ_OPT> or send B<mmdebstrap> output to your compressor of
+choice with a pipe.
+
+=head1 WRAPPERS
+
+=head2 debvm
+
+B<debvm> helps create and run virtual machines for various Debian releases and
+architectures. The tool B<debvm-create> can be used to create a virtual
+machine image and the tool B<debvm-run> can be used to run such a machine
+image. Their purpose primarily is testing software using qemu as a containment
+technology. These are relatively thin wrappers around B<mmdebstrap> and
+B<qemu>.
+
+=head2 bdebstrap
+
+B<bdebstrap> is a YAML config based multi-mirror Debian chroot creation tool.
+B<bdebstrap> is an alternative to B<debootstrap> and a wrapper around
+B<mmdebstrap> to support YAML based configuration files. It inherits all
+benefits from B<mmdebstrap>. The support for configuration allows storing all
+customization in a YAML file instead of having to use a very long one-liner
+call to B<mmdebstrap>. It also layering multiple customizations on top of each
+other, e.g. to support flavors of an image.
+
+=head1 BUGS
+
+https://gitlab.mister-muffin.de/josch/mmdebstrap/issues
+
+https://bugs.debian.org/src:mmdebstrap
+
+As of version 1.20.9, dpkg does not provide facilities preventing it from
+reading the dpkg configuration of the machine running B<mmdebstrap>.
+Therefore, until this dpkg limitation is fixed, a default dpkg configuration is
+recommended on machines running B<mmdebstrap>. If you are using B<mmdebstrap>
+as the non-root user, then as a workaround you could run C<chmod 600
+/etc/dpkg/dpkg.cfg.d/*> so that the config files are only accessible by the
+root user. See Debian bug #808203.
+
+With apt versions before 2.1.16, setting C<[trusted=yes]> or
+C<Acquire::AllowInsecureRepositories "1"> to allow signed archives without a
+known public key or unsigned archives will fail because of a gpg warning in the
+apt output. Since apt does not communicate its status via any other means than
+human readable strings, and because B<mmdebstrap> wants to treat transient
+network errors as errors, B<mmdebstrap> treats any warning from "apt-get
+update" as an error.
+
+=head1 SEE ALSO
+
+L<debootstrap(8)>, L<debvm(1)>, L<bdebstrap(1)>
+
+=cut
+
+# vim: expandtab tabstop=4 softtabstop=4 shiftwidth=4 ft=perl tw=79
diff --git a/mmdebstrap-autopkgtest-build-qemu b/mmdebstrap-autopkgtest-build-qemu
new file mode 100755
index 0000000..19175e5
--- /dev/null
+++ b/mmdebstrap-autopkgtest-build-qemu
@@ -0,0 +1,444 @@
+#!/bin/sh
+# Copyright 2023 Johannes Schauer Marin Rodrigues <josch@debian.org>
+# Copyright 2023 Helmut Grohne <helmut@subdivi.de>
+# SPDX-License-Identifier: MIT
+
+# We generally use single quotes to avoid variable expansion:
+# shellcheck disable=SC2016
+
+# Replacement for autopkgtest-build-qemu and vmdb2 for all architectures
+# supporting EFI booting (amd64, arm64, armhf, i386, riscv64).
+# For use as replacement for autopkgtest-build-qemu and vmdb2 on ppc64el which
+# neither supports extlinux nor efi booting there is an unmaintained script
+# which uses grub instead to boot:
+#
+# https://gitlab.mister-muffin.de/josch/mmdebstrap/src/commit/
+# e523741610a4ed8579642bfc755956f64c847ef3/mmdebstrap-autopkgtest-build-qemu
+
+: <<'POD2MAN'
+=head1 NAME
+
+mmdebstrap-autopkgtest-build-qemu - autopkgtest-build-qemu without vmdb2 but mmdebstrap and EFI boot
+
+=head1 SYNOPSIS
+
+B<mmdebstrap-autopkgtest-build-qemu> [I<OPTIONS>] B<--boot>=B<efi> I<RELEASE> I<IMAGE>
+
+=head1 DESCRIPTION
+
+B<mmdebstrap-autopkgtest-build-qemu> is a mostly compatible drop-in replacement
+for L<autopkgtest-build-qemu(1)> with two main differences: Firstly, it uses
+L<mmdebstrap(1)> instead of L<vmdb2(1)> and thus is able to create QEMU disk
+images without requiring superuser privileges. Secondly, it uses
+L<systemd-boot(7)> and thus only supports booting via EFI. For architectures
+for which L<autopkgtest-virt-qemu(1)> does not default to EFI booting you must
+pass B<--boot=efi> when invoking the autopkgtest virt backend.
+
+=head1 POSITIONAL PARAMETERS
+
+=over 8
+
+=item I<RELEASE>
+
+The release to download from the I<MIRROR>. This parameter is required.
+
+=item I<IMAGE>
+
+The file to write, in raw format. This parameter is required.
+
+=back
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--mirror>=I<MIRROR>
+
+Specify which distribution to install. It defaults to
+http://deb.debian.org/debian (i.e. Debian), but you can pass a mirror of any
+Debian derivative.
+
+=item B<--architecture>=I<ARCHITECTURE>
+
+Set the architecture for the virtual machine image, specified as a L<dpkg(1)>
+architecture. If omitted, the host architecture is assumed.
+
+B<--arch>=I<ARCH> is an alias for this option.
+
+=item B<--script>=I<SCRIPT>
+
+Specifies a user script that will be called with the root filesystem of the
+image as its first parameter. This script can them make any necesssary
+modifications to the root filesystem.
+
+The script must be a POSIX shell script, and should not depend on bash-specific
+features. This script will be executed inside a L<chroot(1)> call in the
+virtual machine root filesystem.
+
+=item B<--size>=I<SIZE>
+
+Specifies the image size for the virtual machine, defaulting to 25G.
+
+=item B<--apt-proxy>=I<PROXY>
+
+Specify an apt proxy to use in the virtual machine. By default, if you have
+an apt proxy configured on the host, the virtual machine will automatically use
+this, otherwise there is no default.
+
+=item B<--boot>=B<efi>, B<--efi>
+
+Select the way the generated image will expect to be booted. Unless you
+explicitly select --boot=efi, operation will fail.
+
+=item B<--keyring>=I<KEYRING>
+
+Passes an additional B<--keyring> parameter to B<mmdebstrap>.
+
+=back
+
+=head1 EXAMPLES
+
+Make sure, that F</path/to/debian-unstable.img> is a path that the unshared
+user has access to. This can be done by ensuring world-execute permissions on
+all path components or by creating the image in a world-readable directory like
+/tmp before copying it into its final location.
+
+ $ mmdebstrap-autopkgtest-build-qemu --boot=efi --arch=amd64 unstable /path/to/debian-unstable.img
+ [...]
+ $ autopkgtest mypackage -- qemu --boot=efi --dpkg-architecture=amd64 /path/to/debian-unstable.img
+
+Make sure to add B<--boot=efi> to both the B<mmdebstrap-autopkgtest-build-qemu>
+as well as the B<autopkgtest-virt-qemu> invocation.
+
+=head1 SEE ALSO
+
+L<autopkgtest-build-qemu(1)>, L<autopkgtest-virt-qemu(1)>, L<mmdebstrap(1)>, L<autopkgtest(1)>
+
+=cut
+POD2MAN
+
+set -eu
+
+die() {
+ echo "$*" 1>&2
+ exit 1
+}
+usage() {
+ die "usage: $0 [--architecture=|--apt-proxy=|--keyring=|--mirror=|--script=|--size=] --boot=efi <RELEASE> <IMAGE>"
+}
+usage_error() {
+ echo "error: $*" 1>&2
+ usage
+}
+
+BOOT=auto
+ARCHITECTURE=$(dpkg --print-architecture)
+IMAGE=
+MIRROR=
+KEYRING=
+RELEASE=
+SIZE=25G
+SCRIPT=
+
+# consumed by setup-testbed
+export AUTOPKGTEST_BUILD_QEMU=1
+
+opt_boot() {
+ BOOT="$1"
+}
+opt_architecture() {
+ ARCHITECTURE="$1"
+}
+opt_arch() {
+ ARCHITECTURE="$1"
+}
+opt_apt_proxy() {
+ # consumed by setup-testbed
+ export AUTOPKGTEST_APT_PROXY="$1"
+ # consumed by mmdebstrap
+ if test "$1" = DIRECT; then
+ unset http_proxy
+ else
+ export http_proxy="$1"
+ fi
+}
+opt_keyring() {
+ KEYRING="$1"
+}
+opt_mirror() {
+ # consumed by setup-testbed
+ export MIRROR="$1"
+}
+opt_script() {
+ test -f "$1" || die "passed script '$1' does not refer to a file"
+ SCRIPT="$1"
+}
+opt_size() {
+ SIZE="$1"
+}
+
+positional=1
+positional_1() {
+ # consumed by setup-testbed
+ export RELEASE="$1"
+}
+positional_2() {
+ IMAGE="$1"
+}
+positional_3() { opt_mirror "$@"; }
+positional_4() { opt_architecture "$@"; }
+positional_5() { opt_script "$@"; }
+positional_6() { opt_size "$@"; }
+positional_7() {
+ die "too many positional options"
+}
+
+while test "$#" -gt 0; do
+ case "$1" in
+ --architecture=*|--arch=*|--boot=*|--keyring=*|--mirror=*|--script=*|--size=*)
+ optname="${1%%=*}"
+ "opt_${optname#--}" "${1#*=}"
+ ;;
+ --apt-proxy=*)
+ opt_apt_proxy "${1#*=}"
+ ;;
+ --architecture|--arch|--boot|--keyring|--mirror|--script|--size)
+ test "$#" -ge 2 || usage_error "missing argument for $1"
+ "opt_${1#--}" "$2"
+ shift
+ ;;
+ --apt-proxy)
+ test "$#" -ge 2 || usage_error "missing argument for $1"
+ opt_apt_proxy "$2"
+ shift
+ ;;
+ --efi)
+ opt_boot efi
+ ;;
+ --*)
+ usage_error "unrecognized argument $1"
+ ;;
+ *)
+ "positional_$positional" "$1"
+ positional=$((positional + 1))
+ ;;
+ esac
+ shift
+done
+
+test -z "$RELEASE" -o -z "$IMAGE" && usage_error "missing positional arguments"
+test "$BOOT" = efi ||
+ die "this tool does not support boot modes other than efi"
+
+case "$ARCHITECTURE" in
+ amd64)
+ EFIIMG=bootx64.efi
+ QEMUARCH=x86_64
+ VMFPKG=ovmf
+ ;;
+ arm64)
+ EFIIMG=bootaa64.efi
+ QEMUARCH=aarch64
+ VMFPKG=qemu-efi-aarch64
+ ;;
+ armhf)
+ EFIIMG=bootarm.efi
+ QEMUARCH=arm
+ VMFPKG=qemu-efi-arm
+ ;;
+ i386)
+ EFIIMG=bootia32.efi
+ QEMUARCH=i386
+ VMFPKG=ovmf-ia32
+ ;;
+ riscv64)
+ EFIIMG=bootriscv64.efi
+ QEMUARCH=riscv64
+ VMFPKG=
+ ;;
+ *)
+ die "unsupported architecture: $ARCHITECTURE"
+ ;;
+esac
+
+if test "$(dpkg-query -f '${db:Status-Status}' -W binutils-multiarch)" = installed; then
+ GNU_PREFIX=
+else
+ GNU_ARCHITECTURE="$(dpkg-architecture "-a$ARCHITECTURE" -qDEB_HOST_GNU_TYPE)"
+ GNU_PREFIX="$GNU_ARCHITECTURE-"
+ GNU_SUFFIX="-$(echo "$GNU_ARCHITECTURE" | tr _ -)"
+ test "$(dpkg-query -f '${db:Status-Status}' -W "binutils$GNU_SUFFIX")" = installed ||
+ die "please install binutils$GNU_SUFFIX or binutils-multiarch"
+fi
+
+arches=" $(dpkg --print-architecture) $(dpkg --print-foreign-architectures | tr '\n' ' ') "
+case $arches in
+ *" $ARCHITECTURE "*) : ;; # nothing to do
+ *) die "enable $ARCHITECTURE by running: sudo dpkg --add-architecture $ARCHITECTURE && sudo apt update" ;;
+esac
+
+for pkg in autopkgtest dosfstools e2fsprogs fdisk mount mtools passwd "systemd-boot-efi:$ARCHITECTURE" uidmap; do
+ if [ "$(dpkg-query -f '${db:Status-Status}' -W "$pkg")" != installed ]; then
+ die "please install $pkg"
+ fi
+done
+
+BOOTSTUB="/usr/lib/systemd/boot/efi/linux${EFIIMG#boot}.stub"
+
+WORKDIR=
+
+cleanup() {
+ test -n "$WORKDIR" && rm -Rf "$WORKDIR"
+}
+
+trap cleanup EXIT INT TERM QUIT
+
+WORKDIR=$(mktemp -d)
+
+FAT_OFFSET_SECTORS=$((1024*2))
+FAT_SIZE_SECTORS=$((1024*254))
+
+# The image is raw and not in qcow2 format because:
+# - faster run-time as the "qemu-image convert" step is not needed
+# - image can be used independent of qemu tooling
+# - modifying the image just with "mount" instead of requiring qemu-nbd
+# - sparse images make the file just as small as with qcow2
+# - trim support is more difficult on qcow2
+# - snapshots and overlays work just as well with raw images
+# - users who prefer qcow2 get to choose to run it themselves with their own
+# custom options like compression
+#
+# Make the image writeable to the first subgid. mmdebstrap will map this gid to
+# the root group. unshare instead will map the current gid to 0 and the first
+# subgid to 1. Therefore mmdebstrap will be able to write to the image.
+rm -f "$IMAGE"
+: >"$IMAGE"
+unshare -U -r --map-groups=auto chown 0:1 "$IMAGE"
+chmod 0660 "$IMAGE"
+
+# Make sure that the unshared user is able to access the file.
+# Alternatively to using /sbin/mkfs.ext4 could use --format=ext2 which would
+# add an extra copy operation and come with the limitations of ext2.
+# Another solution: https://github.com/tytso/e2fsprogs/pull/118
+if ! mmdebstrap --unshare-helper touch "$IMAGE"; then
+ die "$IMAGE cannot be accessed by the unshared user -- either make all path components up to the image itself world-executable or place the image into a world-readable path like /tmp"
+fi
+
+set -- \
+ --mode=unshare \
+ --variant=important \
+ --architecture="$ARCHITECTURE"
+
+test "$RELEASE" = jessie &&
+ set -- "$@" --hook-dir=/usr/share/mmdebstrap/hooks/jessie-or-older
+
+set -- "$@" \
+ "--include=init,linux-image-$ARCHITECTURE,python3" \
+ '--customize-hook=echo host >"$1/etc/hostname"' \
+ '--customize-hook=echo 127.0.0.1 localhost host >"$1/etc/hosts"' \
+ '--customize-hook=passwd --root "$1" --delete root' \
+ '--customize-hook=useradd --root "$1" --home-dir /home/user --create-home user' \
+ '--customize-hook=passwd --root "$1" --delete user' \
+ '--customize-hook=/usr/share/autopkgtest/setup-commands/setup-testbed'
+
+if test -n "$SCRIPT"; then
+ set -- "$@" \
+ "--customize-hook=upload '$SCRIPT' /userscript" \
+ "--chrooted-customize-hook=sh /userscript" \
+ '--customize-hook=rm -f "$1/userscript"'
+fi
+
+EXT4_OFFSET_BYTES=$(( (FAT_OFFSET_SECTORS + FAT_SIZE_SECTORS) * 512))
+EXT4_OPTIONS="offset=$EXT4_OFFSET_BYTES,assume_storage_prezeroed=1"
+set -- "$@" \
+ "--customize-hook=download vmlinuz '$WORKDIR/kernel'" \
+ "--customize-hook=download initrd.img '$WORKDIR/initrd'" \
+ '--customize-hook=mount --bind "$1" "$1/mnt"' \
+ '--customize-hook=mount --bind "$1/mnt/mnt" "$1/mnt/dev"' \
+ '--customize-hook=/sbin/mkfs.ext4 -d "$1/mnt" -L autopkgtestvm -E '"'$EXT4_OPTIONS' '$IMAGE' '$SIZE'" \
+ '--customize-hook=umount --lazy "$1/mnt"' \
+ "$RELEASE" \
+ /dev/null
+
+test -n "$MIRROR" && set -- "$@" "$MIRROR"
+test -n "$KEYRING" && set -- "$@" "--keyring=$KEYRING"
+
+echo "mmdebstrap $*"
+mmdebstrap "$@" || die "mmdebstrap failed"
+
+unshare -U -r --map-groups=auto chown 0:0 "$IMAGE"
+chmod "$(printf %o "$(( 0666 & ~0$(umask) ))")" "$IMAGE"
+
+echo "root=LABEL=autopkgtestvm rw console=ttyS0" > "$WORKDIR/cmdline"
+
+align_size() {
+ echo "$(( ($1) + ($2) - 1 - (($1) + ($2) - 1) % ($2) ))"
+}
+
+alignment=$("${GNU_PREFIX}objdump" -p "$BOOTSTUB" | sed 's/^SectionAlignment\s\+\([0-9]\)/0x/;t;d')
+test -z "$alignment" && die "failed to discover the alignment of the efi stub"
+echo "determined efi vma alignment as $alignment"
+test "$RELEASE" = jessie -a "$((alignment))" -lt "$((1024*1024))" && {
+ echo "increasing efi vma alignment for jessie"
+ alignment=$((1024*1024))
+}
+lastoffset=0
+# shellcheck disable=SC2034 # unused variables serve documentation
+lastoffset="$("${GNU_PREFIX}objdump" -h "$BOOTSTUB" |
+ while read -r idx name size vma lma fileoff algn behind; do
+ test -z "$behind" -a "${algn#"2**"}" != "$algn" || continue
+ offset=$(( 0x$vma + 0x$size ))
+ test "$offset" -gt "$lastoffset" || continue
+ lastoffset="$offset"
+ echo "$lastoffset"
+ done | tail -n1)"
+lastoffset=$(align_size "$lastoffset" "$alignment")
+echo "determined minimum efi vma offset as $lastoffset"
+
+cmdline_size="$(stat -Lc%s "$WORKDIR/cmdline")"
+cmdline_size="$(align_size "$cmdline_size" "$alignment")"
+linux_size="$(stat -Lc%s "$WORKDIR/kernel")"
+linux_size="$(align_size "$linux_size" "$alignment")"
+cmdline_offset="$lastoffset"
+linux_offset=$((cmdline_offset + cmdline_size))
+initrd_offset=$((linux_offset + linux_size))
+
+SOURCE_DATE_EPOCH=0 \
+ "${GNU_PREFIX}objcopy" \
+ --enable-deterministic-archives \
+ --add-section .cmdline="$WORKDIR/cmdline" \
+ --change-section-vma .cmdline="$(printf 0x%x "$cmdline_offset")" \
+ --add-section .linux="$WORKDIR/kernel" \
+ --change-section-vma .linux="$(printf 0x%x "$linux_offset")" \
+ --add-section .initrd="$WORKDIR/initrd" \
+ --change-section-vma .initrd="$(printf 0x%x "$initrd_offset")" \
+ "$BOOTSTUB" "$WORKDIR/efiimg"
+
+rm -f "$WORKDIR/kernel" "$WORKDIR/initrd"
+
+truncate -s "$((FAT_SIZE_SECTORS * 512))" "$WORKDIR/fat"
+/sbin/mkfs.fat -F 32 --invariant "$WORKDIR/fat"
+mmd -i "$WORKDIR/fat" EFI EFI/BOOT
+mcopy -i "$WORKDIR/fat" "$WORKDIR/efiimg" "::EFI/BOOT/$EFIIMG"
+
+rm -f "$WORKDIR/efiimg"
+
+truncate --size="+$((34*512))" "$IMAGE"
+/sbin/sfdisk "$IMAGE" <<EOF
+label: gpt
+unit: sectors
+
+start=$FAT_OFFSET_SECTORS, size=$FAT_SIZE_SECTORS, type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B
+start=$((FAT_OFFSET_SECTORS + FAT_SIZE_SECTORS)), type=0FC63DAF-8483-4772-8E79-3D69D8477DE4
+EOF
+
+dd if="$WORKDIR/fat" of="$IMAGE" conv=notrunc,sparse bs=512 "seek=$FAT_OFFSET_SECTORS" status=none
+
+if test "$(dpkg --print-architecture)" != "$ARCHITECTURE" && test "$(dpkg-query -f '${db:Status-Status}' -W "qemu-system-$QEMUARCH")" != installed; then
+ echo "I: you might need to install a package providing qemu-system-$QEMUARCH to use this image with autopkgtest-virt-qemu" >&2
+fi
+if test -n "$VMFPKG" && test "$(dpkg-query -f '${db:Status-Status}' -W "$VMFPKG")" != installed; then
+ echo "I: you might need to install $VMFPKG to use this image with autopkgtest-virt-qemu" >&2
+fi
+
+echo "I: don't forget to pass --boot=efi when running autopkgtest-virt-qemu with this image" >&2
diff --git a/proxysolver b/proxysolver
new file mode 100755
index 0000000..5cd51fa
--- /dev/null
+++ b/proxysolver
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+#
+# This script is in the public domain
+#
+# Author: Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
+#
+# thin layer around /usr/lib/apt/solvers/apt, so that we can capture the solver
+# result
+#
+# we set Debug::EDSP::WriteSolution=yes so that Install stanzas also come with
+# Package and Version fields. That way, we do not also have to parse the EDSP
+# request and spend time matching ID numbers
+
+import subprocess
+import sys
+import os
+import getpass
+
+if not os.path.exists("/usr/lib/apt/solvers/apt"):
+ print(
+ """Error: ERR_NO_SOLVER
+Message: The external apt solver doesn't exist. You must install the apt-utils package.
+"""
+ )
+ exit()
+
+fname = os.environ.get("APT_EDSP_DUMP_FILENAME")
+if fname is None:
+ print(
+ """Error: ERR_NO_FILENAME
+Message: You have to set the environment variable APT_EDSP_DUMP_FILENAME
+ to a valid filename to store the dump of EDSP solver input in.
+ For example with: export APT_EDSP_DUMP_FILENAME=/tmp/dump.edsp
+"""
+ )
+ exit()
+
+try:
+ with open(fname, "w") as f:
+ with subprocess.Popen(
+ ["/usr/lib/apt/solvers/apt", "-oDebug::EDSP::WriteSolution=yes"],
+ stdin=sys.stdin.fileno(),
+ stdout=subprocess.PIPE,
+ bufsize=0, # unbuffered
+ text=True, # open in text mode
+ ) as p:
+ for line in p.stdout:
+ print(line, end="")
+ f.write(line)
+except (FileNotFoundError, PermissionError) as e:
+ print(
+ """Error: ERR_CREATE_FILE
+Message: Writing EDSP solver input to file '%s' failed as it couldn't be created!
+"""
+ % fname
+ )
diff --git a/run_null.sh b/run_null.sh
new file mode 100755
index 0000000..17b42fa
--- /dev/null
+++ b/run_null.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+set -eu
+
+SUDO=
+while [ "$#" -gt 0 ]; do
+ key="$1"
+ case "$key" in
+ SUDO)
+ SUDO=sudo
+ ;;
+ *)
+ echo "Unknown argument: $key"
+ exit 1
+ ;;
+ esac
+ shift
+done
+
+# - Run command with fds 3 and 4 closed so that whatever test.sh does it
+# cannot interfere with these.
+# - Both stdin and stderr of test.sh are written to stdout
+# - Write exit status of test.sh to fd 3
+# - Write stdout to shared/output.txt as well as to fd 4
+# - Redirect fd 3 to stdout
+# - Read fd 3 and let the group exit with that value
+# - Redirect fd 4 to stdout
+ret=0
+{ { { {
+ ret=0;
+ ( exec 3>&- 4>&-; env --chdir=./shared $SUDO sh -x ./test.sh 2>&1) || ret=$?;
+ echo $ret >&3;
+ } | tee shared/output.txt >&4;
+ } 3>&1;
+ } | { read -r xs; exit "$xs"; }
+} 4>&1 || ret=$?
+if [ "$ret" -ne 0 ]; then
+ echo "test.sh failed"
+ exit 1
+fi
diff --git a/run_qemu.sh b/run_qemu.sh
new file mode 100755
index 0000000..fc00ed9
--- /dev/null
+++ b/run_qemu.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+
+set -eu
+
+: "${DEFAULT_DIST:=unstable}"
+: "${cachedir:=./shared/cache}"
+tmpdir="$(mktemp -d)"
+
+cleanup() {
+ rv=$?
+ rm -f "$tmpdir/log"
+ [ -e "$tmpdir" ] && rmdir "$tmpdir"
+ if [ -e shared/output.txt ]; then
+ res="$(cat shared/exitstatus.txt)"
+ if [ "$res" != "0" ]; then
+ # this might possibly overwrite another non-zero rv
+ rv=1
+ fi
+ fi
+ exit $rv
+}
+
+trap cleanup INT TERM EXIT
+
+echo 1 > shared/exitstatus.txt
+if [ -e shared/output.txt ]; then
+ rm shared/output.txt
+fi
+touch shared/output.txt
+setpriv --pdeathsig TERM tail -f shared/output.txt &
+
+# to connect to serial use:
+# minicom -D 'unix#/tmp/ttyS0'
+#
+# or this (quit with ctrl+q):
+# socat stdin,raw,echo=0,escape=0x11 unix-connect:/tmp/ttyS0
+ret=0
+timeout --foreground 40m debvm-run --image="$(realpath "$cachedir")/debian-$DEFAULT_DIST.ext4" -- \
+ -nic none \
+ -m 4G -snapshot \
+ -monitor unix:/tmp/monitor,server,nowait \
+ -serial unix:/tmp/ttyS0,server,nowait \
+ -serial unix:/tmp/ttyS1,server,nowait \
+ -virtfs local,id=mmdebstrap,path="$(pwd)/shared",security_model=none,mount_tag=mmdebstrap \
+ >"$tmpdir/log" 2>&1 || ret=$?
+if [ "$ret" -ne 0 ]; then
+ cat "$tmpdir/log"
+ exit $ret
+fi
diff --git a/tarfilter b/tarfilter
new file mode 100755
index 0000000..66ef229
--- /dev/null
+++ b/tarfilter
@@ -0,0 +1,300 @@
+#!/usr/bin/env python3
+#
+# This script is in the public domain
+#
+# Author: Johannes Schauer Marin Rodrigues <josch@mister-muffin.de>
+#
+# This script accepts a tarball on standard input and filters it according to
+# the same rules used by dpkg --path-exclude and --path-include, using command
+# line options of the same name. The result is then printed on standard output.
+#
+# A tool like this should be written in C but libarchive has issues:
+# https://github.com/libarchive/libarchive/issues/587
+# https://github.com/libarchive/libarchive/pull/1288/ (needs 3.4.1)
+# Should these issues get fixed, then a good template is tarfilter.c in the
+# examples directory of libarchive.
+#
+# We are not using Perl either, because Archive::Tar slurps the whole tarball
+# into memory.
+#
+# We could also use Go but meh...
+# https://stackoverflow.com/a/59542307/784669
+
+import tarfile
+import sys
+import argparse
+import fnmatch
+import re
+
+
+class PathFilterAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, "pathfilter", [])
+ regex = re.compile(fnmatch.translate(values))
+ items.append((self.dest, regex))
+ setattr(namespace, "pathfilter", items)
+
+
+class PaxFilterAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, "paxfilter", [])
+ regex = re.compile(fnmatch.translate(values))
+ items.append((self.dest, regex))
+ setattr(namespace, "paxfilter", items)
+
+
+class TypeFilterAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, "typefilter", [])
+ match values:
+ case "REGTYPE" | "0":
+ items.append(tarfile.REGTYPE)
+ case "LNKTYPE" | "1":
+ items.append(tarfile.LNKTYPE)
+ case "SYMTYPE" | "2":
+ items.append(tarfile.SYMTYPE)
+ case "CHRTYPE" | "3":
+ items.append(tarfile.CHRTYPE)
+ case "BLKTYPE" | "4":
+ items.append(tarfile.BLKTYPE)
+ case "DIRTYPE" | "5":
+ items.append(tarfile.DIRTYPE)
+ case "FIFOTYPE" | "6":
+ items.append(tarfile.FIFOTYPE)
+ case _:
+ raise ValueError("invalid type: %s" % values)
+ setattr(namespace, "typefilter", items)
+
+
+class TransformAction(argparse.Action):
+ def __call__(self, parser, namespace, values, option_string=None):
+ items = getattr(namespace, "trans", [])
+ # This function mimics what src/transform.c from tar does
+ if not values.startswith("s"):
+ raise ValueError("regex must start with an 's'")
+ if len(values) <= 4:
+ # minimum regex: s/x//
+ raise ValueError("invalid regex (too short)")
+ d = values[1]
+ if values.startswith(f"s{d}{d}"):
+ raise ValueError("empty regex")
+ values = values.removeprefix(f"s{d}")
+ flags = 0
+ if values.endswith(f"{d}i"):
+ # trailing flags
+ flags = re.IGNORECASE
+ values = values.removesuffix(f"{d}i")
+ # This regex only finds non-empty tokens.
+ # Finding empty tokens would require a variable length look-behind
+ # or \K in order to find escaped delimiters which is not supported by
+ # the python re module.
+ tokens = re.findall(rf"(?:\\[\\{d}]|[^{d}])+", values)
+ match len(tokens):
+ case 0:
+ raise ValueError("invalid regex: not enough terms")
+ case 1:
+ repl = ""
+ case 2:
+ repl = tokens[1]
+ case _:
+ raise ValueError("invalid regex: too many terms: %s" % tokens)
+ items.append((re.compile(tokens[0], flags), repl))
+ setattr(namespace, "trans", items)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description="""\
+Filters a tarball on standard input by the same rules as the dpkg --path-exclude
+and --path-include options and writes resulting tarball to standard output. See
+dpkg(1) for information on how these two options work in detail. To reuse the
+exact same semantics as used by dpkg, paths must be given as /path and not as
+./path even though they might be stored as such in the tarball.
+
+Secondly, filter out unwanted pax extended headers using --pax-exclude and
+--pax-include. This is useful in cases where a tool only accepts certain xattr
+prefixes. For example tar2sqfs only supports SCHILY.xattr.user.*,
+SCHILY.xattr.trusted.* and SCHILY.xattr.security.* but not
+SCHILY.xattr.system.posix_acl_default.*.
+
+Both types of options use Unix shell-style wildcards:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any character not in seq
+
+Thirdly, filter out files matching a specific tar archive member type using
+--type-exclude. Valid type names are REGTYPE (regular file), LNKTYPE
+(hardlink), SYMTYPE (symlink), CHRTYPE (character special), BLKTYPE (block
+special), DIRTYPE (directory), FIFOTYPE (fifo) or their tar format flag value
+(0-6, respectively).
+
+Fourthly, transform the path of tar members using a sed expression just as with
+GNU tar --transform.
+
+Fifthly, strip leading directory components off of tar members. Just as with
+GNU tar --strip-components, tar members that have less or equal components in
+their path are not passed through.
+
+Lastly, shift user id and group id of each entry by the value given by the
+--idshift argument. The resulting uid or gid must not be negative.
+""",
+ )
+ parser.add_argument(
+ "--path-exclude",
+ metavar="pattern",
+ action=PathFilterAction,
+ help="Exclude path matching the given shell pattern. "
+ "This option can be specified multiple times.",
+ )
+ parser.add_argument(
+ "--path-include",
+ metavar="pattern",
+ action=PathFilterAction,
+ help="Re-include a pattern after a previous exclusion. "
+ "This option can be specified multiple times.",
+ )
+ parser.add_argument(
+ "--pax-exclude",
+ metavar="pattern",
+ action=PaxFilterAction,
+ help="Exclude pax header matching the given globbing pattern. "
+ "This option can be specified multiple times.",
+ )
+ parser.add_argument(
+ "--pax-include",
+ metavar="pattern",
+ action=PaxFilterAction,
+ help="Re-include a pax header after a previous exclusion. "
+ "This option can be specified multiple times.",
+ )
+ parser.add_argument(
+ "--type-exclude",
+ metavar="type",
+ action=TypeFilterAction,
+ help="Exclude certain member types by their type. Choose types either "
+ "by their name (REGTYPE, LNKTYPE, SYMTYPE, CHRTYPE, BLKTYPE, DIRTYPE, "
+ "FIFOTYPE) or by their tar format flag values (0-6, respectively). "
+ "This option can be specified multiple times.",
+ )
+ parser.add_argument(
+ "--transform",
+ "--xform",
+ metavar="EXPRESSION",
+ action=TransformAction,
+ help="Use sed replace EXPRESSION to transform file names. "
+ "This option can be specified multiple times.",
+ )
+ parser.add_argument(
+ "--strip-components",
+ metavar="NUMBER",
+ type=int,
+ help="Strip NUMBER leading components from file names",
+ )
+ parser.add_argument(
+ "--idshift",
+ metavar="NUM",
+ type=int,
+ help="Integer value by which to shift the uid and gid of each entry",
+ )
+ args = parser.parse_args()
+ if (
+ not hasattr(args, "pathfilter")
+ and not hasattr(args, "paxfilter")
+ and not hasattr(args, "typefilter")
+ and not hasattr(args, "strip_components")
+ ):
+ from shutil import copyfileobj
+
+ copyfileobj(sys.stdin.buffer, sys.stdout.buffer)
+ exit()
+
+ # same logic as in dpkg/src/filters.c/filter_should_skip()
+ prefix_prog = re.compile(r"^([^*?[\\]*).*")
+
+ def path_filter_should_skip(member):
+ skip = False
+ if not hasattr(args, "pathfilter"):
+ return False
+ for t, r in args.pathfilter:
+ if r.match(member.name[1:]) is not None:
+ if t == "path_include":
+ skip = False
+ else:
+ skip = True
+ if skip and (member.isdir() or member.issym()):
+ for t, r in args.pathfilter:
+ if t != "path_include":
+ continue
+ prefix = prefix_prog.sub(r"\1", r.pattern)
+ prefix = prefix.rstrip("/")
+ if member.name[1:].startswith(prefix):
+ return False
+ return skip
+
+ def pax_filter_should_skip(header):
+ if not hasattr(args, "paxfilter"):
+ return False
+ skip = False
+ for t, r in args.paxfilter:
+ if r.match(header) is None:
+ continue
+ if t == "pax_include":
+ skip = False
+ else:
+ skip = True
+ return skip
+
+ def type_filter_should_skip(member):
+ if not hasattr(args, "typefilter"):
+ return False
+ for t in args.typefilter:
+ if member.type == t:
+ return True
+ return False
+
+ # starting with Python 3.8, the default format became PAX_FORMAT but we
+ # are still explicit here in case of future changes.
+ with tarfile.open(fileobj=sys.stdin.buffer, mode="r|*") as in_tar, tarfile.open(
+ fileobj=sys.stdout.buffer, mode="w|", format=tarfile.PAX_FORMAT
+ ) as out_tar:
+ for member in in_tar:
+ if path_filter_should_skip(member):
+ continue
+ if type_filter_should_skip(member):
+ continue
+ if args.strip_components:
+ comps = member.name.split("/")
+ # just as with GNU tar, archive members with less or equal
+ # number of components are not passed through at all
+ if len(comps) <= args.strip_components:
+ continue
+ member.name = "/".join(comps[args.strip_components :])
+ member.pax_headers = {
+ k: v
+ for k, v in member.pax_headers.items()
+ if not pax_filter_should_skip(k)
+ }
+ if args.idshift:
+ if args.idshift < 0 and -args.idshift > member.uid:
+ print("uid cannot be negative", file=sys.stderr)
+ exit(1)
+ if args.idshift < 0 and -args.idshift > member.gid:
+ print("gid cannot be negative", file=sys.stderr)
+ exit(1)
+ member.uid += args.idshift
+ member.gid += args.idshift
+ if hasattr(args, "trans"):
+ for r, s in args.trans:
+ member.name = r.sub(s, member.name)
+ if member.isfile():
+ with in_tar.extractfile(member) as file:
+ out_tar.addfile(member, file)
+ else:
+ out_tar.addfile(member)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/apt-patterns b/tests/apt-patterns
new file mode 100644
index 0000000..c87e932
--- /dev/null
+++ b/tests/apt-patterns
@@ -0,0 +1,8 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=essential \
+ --include '?or(?exact-name(dummy-does-not-exist),?exact-name(apt))' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | grep -v ./var/lib/apt/extended_states | diff -u tar1.txt -
diff --git a/tests/apt-patterns-custom b/tests/apt-patterns-custom
new file mode 100644
index 0000000..2348a76
--- /dev/null
+++ b/tests/apt-patterns-custom
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=custom \
+ --include '?narrow(?archive(^{{ DIST }}$),?essential)' \
+ --include apt \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/aptopt b/tests/aptopt
new file mode 100644
index 0000000..c757c30
--- /dev/null
+++ b/tests/aptopt
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rm -f /tmp/config" EXIT INT TERM
+echo 'Acquire::Languages "none";' > /tmp/config
+{{ CMD }} --mode=root --variant=apt --aptopt='Acquire::Check-Valid-Until "false"' --aptopt=/tmp/config {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf 'Acquire::Check-Valid-Until "false";\nAcquire::Languages "none";\n' | cmp /tmp/debian-chroot/etc/apt/apt.conf.d/99mmdebstrap -
+rm /tmp/debian-chroot/etc/apt/apt.conf.d/99mmdebstrap
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/arm64-without-qemu-support b/tests/arm64-without-qemu-support
new file mode 100644
index 0000000..98b4724
--- /dev/null
+++ b/tests/arm64-without-qemu-support
@@ -0,0 +1,18 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+apt-get remove --yes qemu-user-static binfmt-support qemu-user
+# the following is not necessary anymore since systemd-binfmt
+# successfully disables support upon removal of qemu-user with
+# the upload of src:systemd 251.2-4: https://bugs.debian.org/1012163
+#echo 0 > /proc/sys/fs/binfmt_misc/qemu-aarch64
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt --architectures=arm64 {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/as-debootstrap-unshare-wrapper b/tests/as-debootstrap-unshare-wrapper
new file mode 100644
index 0000000..b3f7a44
--- /dev/null
+++ b/tests/as-debootstrap-unshare-wrapper
@@ -0,0 +1,133 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+# debootstrap uses apt-config to figure out whether the system running it has
+# any proxies configured and then runs the binary to set the http_proxy
+# environment variable. This will fail if debootstrap is run in a linux user
+# namespace because auto-apt-proxy will see /tmp/.auto-apt-proxy-0 as being
+# owned by the user "nobody" and group "nogroup" and fail with:
+# insecure cache dir /tmp/.auto-apt-proxy-0. Must be owned by UID 0 and have permissions 700
+# We cannot overwrite a configuration item using the APT_CONFIG environment
+# variable, so instead we use it to set the Dir configuration option
+# to /dev/null to force all apt settings to their defaults.
+# There is currently no better way to disable this behavior. See also:
+# https://bugs.debian.org/1031105
+# https://salsa.debian.org/installer-team/debootstrap/-/merge_requests/90
+AUTOPROXY=
+eval "$(apt-config shell AUTOPROXY Acquire::http::Proxy-Auto-Detect)"
+if [ -n "$AUTOPROXY" ] && [ -x "$AUTOPROXY" ] && [ -e /tmp/.auto-apt-proxy-0 ]; then
+ TMP_APT_CONFIG=$(mktemp)
+ echo "Dir \"/dev/null\";" > "$TMP_APT_CONFIG"
+ chmod 644 "$TMP_APT_CONFIG"
+fi
+
+$prefix {{ CMD }} --variant=custom --mode={{ MODE }} \
+ --setup-hook='env '"${AUTOPROXY:+APT_CONFIG='$TMP_APT_CONFIG'}"' debootstrap --variant={{ VARIANT }} unstable "$1" {{ MIRROR }}' \
+ - /tmp/debian-mm.tar {{ MIRROR }}
+if [ -n "$AUTOPROXY" ] && [ -x "$AUTOPROXY" ] && [ -e /tmp/.auto-apt-proxy-0 ]; then
+ rm "$TMP_APT_CONFIG"
+fi
+
+mkdir /tmp/debian-mm
+tar --xattrs --xattrs-include='*' -C /tmp/debian-mm -xf /tmp/debian-mm.tar
+
+mkdir /tmp/debian-debootstrap
+tar --xattrs --xattrs-include='*' -C /tmp/debian-debootstrap -xf "cache/debian-unstable-{{ VARIANT }}.tar"
+
+# diff cannot compare device nodes, so we use tar to do that for us and then
+# delete the directory
+tar -C /tmp/debian-debootstrap -cf dev1.tar ./dev
+tar -C /tmp/debian-mm -cf dev2.tar ./dev
+cmp dev1.tar dev2.tar >&2
+rm dev1.tar dev2.tar
+rm -r /tmp/debian-debootstrap/dev /tmp/debian-mm/dev
+
+# remove downloaded deb packages
+rm /tmp/debian-debootstrap/var/cache/apt/archives/*.deb
+# remove aux-cache
+rm /tmp/debian-debootstrap/var/cache/ldconfig/aux-cache
+# remove logs
+rm /tmp/debian-debootstrap/var/log/dpkg.log \
+ /tmp/debian-debootstrap/var/log/bootstrap.log \
+ /tmp/debian-debootstrap/var/log/alternatives.log \
+ /tmp/debian-mm/var/log/bootstrap.log
+
+# clear out /run except for /run/lock
+find /tmp/debian-debootstrap/run/ -mindepth 1 -maxdepth 1 ! -name lock -print0 | xargs --no-run-if-empty -0 rm -r
+
+# debootstrap doesn't clean apt
+rm /tmp/debian-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_unstable_main_binary-{{ HOSTARCH }}_Packages \
+ /tmp/debian-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_unstable_InRelease \
+ /tmp/debian-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_unstable_Release \
+ /tmp/debian-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_unstable_Release.gpg
+
+if [ -e /tmp/debian-debootstrap/etc/machine-id ]; then
+ rm /tmp/debian-debootstrap/etc/machine-id /tmp/debian-mm/etc/machine-id
+fi
+rm /tmp/debian-mm/var/cache/apt/archives/lock
+rm /tmp/debian-mm/var/lib/apt/lists/lock
+rm /tmp/debian-mm/var/lib/dpkg/arch
+
+# also needed for users that are created by systemd-sysusers before systemd 252
+# https://github.com/systemd/systemd/pull/24534
+for f in shadow shadow-; do
+ if [ ! -e /tmp/debian-debootstrap/etc/$f ]; then
+ continue
+ fi
+ if ! cmp /tmp/debian-debootstrap/etc/$f /tmp/debian-mm/etc/$f >&2; then
+ echo patching /etc/$f >&2
+ awk -v FS=: -v OFS=: -v SDE={{ SOURCE_DATE_EPOCH }} '{ print $1,$2,int(SDE/60/60/24),$4,$5,$6,$7,$8,$9 }' < /tmp/debian-mm/etc/$f > /tmp/debian-mm/etc/$f.bak
+ cat /tmp/debian-mm/etc/$f.bak > /tmp/debian-mm/etc/$f
+ rm /tmp/debian-mm/etc/$f.bak
+ else
+ echo no difference for /etc/$f >&2
+ fi
+done
+
+# isc-dhcp-client postinst doesn't create this file in debootstrap run with
+# unshared wrapper. The responsible postinst snippet was automatically added
+# by dh_apparmor since isc-dhcp-client 4.4.3-P1-1.1
+if [ -e /tmp/debian-debootstrap/etc/apparmor.d/local/sbin.dhclient ] && [ ! -s /tmp/debian-debootstrap/etc/apparmor.d/local/sbin.dhclient ]; then
+ echo /sbin/setcap > /tmp/debian-debootstrap/etc/apparmor.d/local/sbin.dhclient
+fi
+
+# check if the file content differs
+diff --unified --no-dereference --recursive /tmp/debian-debootstrap /tmp/debian-mm >&2
+
+# check permissions, ownership, symlink targets, modification times using tar
+# mtimes of directories created by mmdebstrap will differ, thus we equalize them first
+for d in etc/apt/preferences.d/ etc/apt/sources.list.d/ etc/dpkg/dpkg.cfg.d/ var/log/apt/; do
+ touch --date="@{{ SOURCE_DATE_EPOCH }}" /tmp/debian-debootstrap/$d /tmp/debian-mm/$d
+done
+# debootstrap never ran apt -- fixing permissions
+for d in ./var/lib/apt/lists/partial ./var/cache/apt/archives/partial; do
+ chroot /tmp/debian-debootstrap chmod 0700 $d
+ chroot /tmp/debian-debootstrap chown _apt:root $d
+done
+tar -C /tmp/debian-debootstrap --numeric-owner --xattrs --xattrs-include='*' --sort=name --clamp-mtime --mtime="$(date --utc --date=@{{ SOURCE_DATE_EPOCH }} --iso-8601=seconds)" -cf /tmp/root1.tar .
+tar -C /tmp/debian-mm --numeric-owner --xattrs --xattrs-include='*' --sort=name --clamp-mtime --mtime="$(date --utc --date=@{{ SOURCE_DATE_EPOCH }} --iso-8601=seconds)" -cf /tmp/root2.tar .
+tar --full-time --verbose -tf /tmp/root1.tar > /tmp/root1.tar.list
+tar --full-time --verbose -tf /tmp/root2.tar > /tmp/root2.tar.list
+# despite SOURCE_DATE_EPOCH and --clamp-mtime, the timestamps in the tarball
+# will slightly differ from each other in the sub-second precision (last
+# decimals) so the tarballs will not be identical, so we use diff to compare
+# content and tar to compare attributes
+diff -u /tmp/root1.tar.list /tmp/root2.tar.list >&2
+rm /tmp/root1.tar /tmp/root2.tar /tmp/root1.tar.list /tmp/root2.tar.list
+
+rm /tmp/debian-mm.tar
+rm -r /tmp/debian-debootstrap /tmp/debian-mm
diff --git a/tests/ascii-armored-keys b/tests/ascii-armored-keys
new file mode 100644
index 0000000..518991c
--- /dev/null
+++ b/tests/ascii-armored-keys
@@ -0,0 +1,21 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+for f in /etc/apt/trusted.gpg.d/*.gpg /etc/apt/trusted.gpg.d/*.asc; do
+ [ -e "$f" ] || continue
+ rm "$f"
+done
+rmdir /etc/apt/trusted.gpg.d
+mkdir /etc/apt/trusted.gpg.d
+for f in /usr/share/keyrings/*.gpg; do
+ name=$(basename "$f" .gpg)
+ gpg --no-default-keyring --keyring="/usr/share/keyrings/$name.gpg" --armor --output="/etc/apt/trusted.gpg.d/$name.asc" --export
+ rm "/usr/share/keyrings/$name.gpg"
+done
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot.tar
diff --git a/tests/aspcud-apt-solver b/tests/aspcud-apt-solver
new file mode 100644
index 0000000..bc0fbc3
--- /dev/null
+++ b/tests/aspcud-apt-solver
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=custom \
+ --include "$(tr '\n' ',' < pkglist.txt)" \
+ --aptopt='APT::Solver "aspcud"' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort \
+ | grep -v '^./etc/apt/apt.conf.d/99mmdebstrap$' \
+ | diff -u tar1.txt -
diff --git a/tests/auto-mode-as-normal-user b/tests/auto-mode-as-normal-user
new file mode 100644
index 0000000..e8ab828
--- /dev/null
+++ b/tests/auto-mode-as-normal-user
@@ -0,0 +1,22 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -f /tmp/debian-chroot.tar.gz" EXIT INT TERM
+
+[ {{ MODE }} = "auto" ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar.gz {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar.gz | sort | diff -u tar1.txt -
diff --git a/tests/auto-mode-without-unshare-capabilities b/tests/auto-mode-without-unshare-capabilities
new file mode 100644
index 0000000..17244b8
--- /dev/null
+++ b/tests/auto-mode-without-unshare-capabilities
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+useradd --home-dir /home/user --create-home user
+if [ -e /proc/sys/kernel/unprivileged_userns_clone ] && [ "$(sysctl -n kernel.unprivileged_userns_clone)" = "1" ]; then
+ sysctl -w kernel.unprivileged_userns_clone=0
+fi
+runuser -u user -- {{ CMD }} --mode=auto --variant=apt {{ DIST }} /tmp/debian-chroot.tar.gz {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar.gz | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar.gz
diff --git a/tests/automatic-mirror-from-suite b/tests/automatic-mirror-from-suite
new file mode 100644
index 0000000..7cff5a6
--- /dev/null
+++ b/tests/automatic-mirror-from-suite
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+cat << HOSTS >> /etc/hosts
+127.0.0.1 deb.debian.org
+127.0.0.1 security.debian.org
+HOSTS
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/check-against-debootstrap-dist b/tests/check-against-debootstrap-dist
new file mode 100644
index 0000000..b5706c6
--- /dev/null
+++ b/tests/check-against-debootstrap-dist
@@ -0,0 +1,228 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+echo "SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH"
+
+# we create the apt user ourselves or otherwise its uid/gid will differ
+# compared to the one chosen in debootstrap because of different installation
+# order in comparison to the systemd users
+# https://bugs.debian.org/969631
+# we cannot use useradd because passwd is not Essential:yes
+{{ CMD }} --variant={{ VARIANT }} --mode={{ MODE }} \
+ --essential-hook='[ {{ DIST }} = oldstable ] && [ {{ VARIANT }} = - ] && echo _apt:*:100:65534::/nonexistent:/usr/sbin/nologin >> "$1"/etc/passwd || :' \
+ "$(if [ {{ DIST }} = oldstable ]; then echo --merged-usr; else echo --hook-dir=./hooks/merged-usr; fi)" \
+ "$(case {{ DIST }} in oldstable) echo --include=e2fsprogs,mount,tzdata,gcc-9-base;; stable) echo --include=e2fsprogs,mount,tzdata;; *) echo --include=base-files ;; esac )" \
+ {{ DIST }} /tmp/debian-{{ DIST }}-mm.tar {{ MIRROR }}
+
+mkdir /tmp/debian-{{ DIST }}-mm
+tar --xattrs --xattrs-include='*' -C /tmp/debian-{{ DIST }}-mm -xf /tmp/debian-{{ DIST }}-mm.tar
+rm /tmp/debian-{{ DIST }}-mm.tar
+
+mkdir /tmp/debian-{{ DIST }}-debootstrap
+tar --xattrs --xattrs-include='*' -C /tmp/debian-{{ DIST }}-debootstrap -xf "cache/debian-{{ DIST }}-{{ VARIANT }}.tar"
+
+# diff cannot compare device nodes, so we use tar to do that for us and then
+# delete the directory
+tar -C /tmp/debian-{{ DIST }}-debootstrap -cf /tmp/dev1.tar ./dev
+tar -C /tmp/debian-{{ DIST }}-mm -cf /tmp/dev2.tar ./dev
+ret=0
+cmp /tmp/dev1.tar /tmp/dev2.tar >&2 || ret=$?
+if [ "$ret" -ne 0 ]; then
+ if type diffoscope >/dev/null; then
+ diffoscope /tmp/dev1.tar /tmp/dev2.tar
+ exit 1
+ else
+ echo "no diffoscope installed" >&2
+ fi
+ if type base64 >/dev/null; then
+ base64 /tmp/dev1.tar
+ base64 /tmp/dev2.tar
+ exit 1
+ else
+ echo "no base64 installed" >&2
+ fi
+ if type xxd >/dev/null; then
+ xxd /tmp/dev1.tar
+ xxd /tmp/dev2.tar
+ exit 1
+ else
+ echo "no xxd installed" >&2
+ fi
+ exit 1
+fi
+rm /tmp/dev1.tar /tmp/dev2.tar
+rm -r /tmp/debian-{{ DIST }}-debootstrap/dev /tmp/debian-{{ DIST }}-mm/dev
+
+# remove downloaded deb packages
+rm /tmp/debian-{{ DIST }}-debootstrap/var/cache/apt/archives/*.deb
+# remove aux-cache
+rm /tmp/debian-{{ DIST }}-debootstrap/var/cache/ldconfig/aux-cache
+# remove logs
+rm /tmp/debian-{{ DIST }}-debootstrap/var/log/dpkg.log \
+ /tmp/debian-{{ DIST }}-debootstrap/var/log/bootstrap.log \
+ /tmp/debian-{{ DIST }}-debootstrap/var/log/alternatives.log
+# remove *-old files
+rm /tmp/debian-{{ DIST }}-debootstrap/var/cache/debconf/config.dat-old \
+ /tmp/debian-{{ DIST }}-mm/var/cache/debconf/config.dat-old
+rm /tmp/debian-{{ DIST }}-debootstrap/var/cache/debconf/templates.dat-old \
+ /tmp/debian-{{ DIST }}-mm/var/cache/debconf/templates.dat-old
+rm /tmp/debian-{{ DIST }}-debootstrap/var/lib/dpkg/status-old \
+ /tmp/debian-{{ DIST }}-mm/var/lib/dpkg/status-old
+# remove dpkg files
+rm /tmp/debian-{{ DIST }}-debootstrap/var/lib/dpkg/available
+rm /tmp/debian-{{ DIST }}-debootstrap/var/lib/dpkg/cmethopt
+# remove /var/lib/dpkg/arch
+rm /tmp/debian-{{ DIST }}-mm/var/lib/dpkg/arch
+# since we installed packages directly from the .deb files, Priorities differ
+# thus we first check for equality and then remove the files
+chroot /tmp/debian-{{ DIST }}-debootstrap dpkg --list > /tmp/dpkg1
+chroot /tmp/debian-{{ DIST }}-mm dpkg --list > /tmp/dpkg2
+diff -u /tmp/dpkg1 /tmp/dpkg2 >&2
+rm /tmp/dpkg1 /tmp/dpkg2
+grep -v '^Priority: ' /tmp/debian-{{ DIST }}-debootstrap/var/lib/dpkg/status > /tmp/status1
+grep -v '^Priority: ' /tmp/debian-{{ DIST }}-mm/var/lib/dpkg/status > /tmp/status2
+diff -u /tmp/status1 /tmp/status2 >&2
+rm /tmp/status1 /tmp/status2
+rm /tmp/debian-{{ DIST }}-debootstrap/var/lib/dpkg/status /tmp/debian-{{ DIST }}-mm/var/lib/dpkg/status
+# debootstrap exposes the hosts's kernel version
+if [ -e /tmp/debian-{{ DIST }}-debootstrap/etc/apt/apt.conf.d/01autoremove-kernels ]; then
+ rm /tmp/debian-{{ DIST }}-debootstrap/etc/apt/apt.conf.d/01autoremove-kernels
+fi
+if [ -e /tmp/debian-{{ DIST }}-mm/etc/apt/apt.conf.d/01autoremove-kernels ]; then
+ rm /tmp/debian-{{ DIST }}-mm/etc/apt/apt.conf.d/01autoremove-kernels
+fi
+# clear out /run except for /run/lock
+find /tmp/debian-{{ DIST }}-debootstrap/run/ -mindepth 1 -maxdepth 1 ! -name lock -print0 | xargs --no-run-if-empty -0 rm -r
+# debootstrap doesn't clean apt
+rm /tmp/debian-{{ DIST }}-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_{{ DIST }}_main_binary-{{ HOSTARCH }}_Packages \
+ /tmp/debian-{{ DIST }}-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_{{ DIST }}_InRelease \
+ /tmp/debian-{{ DIST }}-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_{{ DIST }}_Release \
+ /tmp/debian-{{ DIST }}-debootstrap/var/lib/apt/lists/127.0.0.1_debian_dists_{{ DIST }}_Release.gpg
+
+if [ "{{ VARIANT }}" = "-" ]; then
+ rm /tmp/debian-{{ DIST }}-debootstrap/etc/machine-id
+ rm /tmp/debian-{{ DIST }}-mm/etc/machine-id
+ rm /tmp/debian-{{ DIST }}-debootstrap/var/lib/systemd/catalog/database
+ rm /tmp/debian-{{ DIST }}-mm/var/lib/systemd/catalog/database
+
+ cap=$(chroot /tmp/debian-{{ DIST }}-debootstrap /sbin/getcap /bin/ping)
+ expected="/bin/ping cap_net_raw=ep"
+ if [ "$cap" != "$expected" ]; then
+ echo "expected bin/ping to have capabilities $expected" >&2
+ echo "but debootstrap produced: $cap" >&2
+ exit 1
+ fi
+ cap=$(chroot /tmp/debian-{{ DIST }}-mm /sbin/getcap /bin/ping)
+ if [ "$cap" != "$expected" ]; then
+ echo "expected bin/ping to have capabilities $expected" >&2
+ echo "but mmdebstrap produced: $cap" >&2
+ exit 1
+ fi
+fi
+rm /tmp/debian-{{ DIST }}-mm/var/cache/apt/archives/lock
+rm /tmp/debian-{{ DIST }}-mm/var/lib/apt/extended_states
+rm /tmp/debian-{{ DIST }}-mm/var/lib/apt/lists/lock
+
+# the list of shells might be sorted wrongly
+# /var/lib/dpkg/triggers/File might be sorted wrongly
+for f in "/var/lib/dpkg/triggers/File" "/etc/shells"; do
+ f1="/tmp/debian-{{ DIST }}-debootstrap/$f"
+ f2="/tmp/debian-{{ DIST }}-mm/$f"
+ # both chroots must have the file
+ if [ ! -e "$f1" ] || [ ! -e "$f2" ]; then
+ continue
+ fi
+ # the file must be different
+ if cmp "$f1" "$f2" >&2; then
+ continue
+ fi
+ # then sort both
+ sort -o "$f1" "$f1"
+ sort -o "$f2" "$f2"
+done
+
+# Because of unreproducible uids (#969631) we created the _apt user ourselves
+# and because passwd is not Essential:yes we didn't use useradd. But newer
+# versions of adduser and shadow will create a different /etc/shadow
+if [ "{{ VARIANT }}" = "-" ] && [ "{{ DIST}}" = oldstable ]; then
+ for f in shadow shadow-; do
+ if grep -q '^_apt:!:' /tmp/debian-{{ DIST }}-debootstrap/etc/$f; then
+ sed -i 's/^_apt:\*:\([^:]\+\):0:99999:7:::$/_apt:!:\1::::::/' /tmp/debian-{{ DIST }}-mm/etc/$f
+ fi
+ done
+fi
+
+for log in faillog lastlog; do
+ if ! cmp /tmp/debian-{{ DIST }}-debootstrap/var/log/$log /tmp/debian-{{ DIST }}-mm/var/log/$log >&2;then
+ # if the files differ, make sure they are all zeroes
+ cmp -n "$(stat -c %s "/tmp/debian-{{ DIST }}-debootstrap/var/log/$log")" "/tmp/debian-{{ DIST }}-debootstrap/var/log/$log" /dev/zero >&2
+ cmp -n "$(stat -c %s "/tmp/debian-{{ DIST }}-mm/var/log/$log")" "/tmp/debian-{{ DIST }}-mm/var/log/$log" /dev/zero >&2
+ # then delete them
+ rm /tmp/debian-{{ DIST }}-debootstrap/var/log/$log /tmp/debian-{{ DIST }}-mm/var/log/$log
+ fi
+done
+
+# the order in which systemd and cron get installed differ and thus the order
+# of lines in /etc/group and /etc/gshadow differs
+if [ "{{ VARIANT }}" = "-" ]; then
+ for f in group group- gshadow gshadow-; do
+ for d in mm debootstrap; do
+ sort /tmp/debian-{{ DIST }}-$d/etc/$f > /tmp/debian-{{ DIST }}-$d/etc/$f.bak
+ mv /tmp/debian-{{ DIST }}-$d/etc/$f.bak /tmp/debian-{{ DIST }}-$d/etc/$f
+ done
+ done
+fi
+
+# since debootstrap 1.0.133 there is no tzdata in the buildd variant and thus
+# debootstrap creates its own /etc/localtime
+if [ "{{ VARIANT }}" = "buildd" ] && [ "{{ DIST }}" != "stable" ] && [ "{{ DIST }}" != "oldstable" ]; then
+ [ "$(readlink /tmp/debian-{{ DIST }}-debootstrap/etc/localtime)" = /usr/share/zoneinfo/UTC ]
+ rm /tmp/debian-{{ DIST }}-debootstrap/etc/localtime
+fi
+
+# starting with systemd 255 upstream dropped splitusr support and depending on
+# the installation order, symlink targets are prefixed with /usr or not
+# See #1060000 and #1054137
+case {{ DIST }} in testing|unstable)
+ for f in multi-user.target.wants/e2scrub_reap.service timers.target.wants/apt-daily-upgrade.timer timers.target.wants/apt-daily.timer timers.target.wants/e2scrub_all.timer; do
+ for d in mm debootstrap; do
+ [ -L "/tmp/debian-{{ DIST }}-$d/etc/systemd/system/$f" ] || continue
+ oldlink="$(readlink "/tmp/debian-{{ DIST }}-$d/etc/systemd/system/$f")"
+ case $oldlink in
+ /usr/*) : ;;
+ /*) oldlink="/usr$oldlink" ;;
+ *) echo unexpected >&2; exit 1 ;;
+ esac
+ ln -sf "$oldlink" "/tmp/debian-{{ DIST }}-$d/etc/systemd/system/$f"
+ done
+ done
+ ;;
+esac
+
+# check if the file content differs
+diff --unified --no-dereference --recursive /tmp/debian-{{ DIST }}-debootstrap /tmp/debian-{{ DIST }}-mm >&2
+
+# check permissions, ownership, symlink targets, modification times using tar
+# directory mtimes will differ, thus we equalize them first
+find /tmp/debian-{{ DIST }}-debootstrap /tmp/debian-{{ DIST }}-mm -type d -print0 | xargs -0 touch --date="@{{ SOURCE_DATE_EPOCH }}"
+# debootstrap never ran apt -- fixing permissions
+for d in ./var/lib/apt/lists/partial ./var/cache/apt/archives/partial; do
+ unmergedPATH="$PATH$(if [ "{{ DIST }}" = oldstable ]; then echo :/bin:/sbin; fi)"
+ PATH="$unmergedPATH" chroot /tmp/debian-{{ DIST }}-debootstrap chmod 0700 $d
+ PATH="$unmergedPATH" chroot /tmp/debian-{{ DIST }}-debootstrap chown "$(id -u _apt):root" $d
+done
+tar -C /tmp/debian-{{ DIST }}-debootstrap --numeric-owner --sort=name --clamp-mtime --mtime="$(date --utc --date=@{{ SOURCE_DATE_EPOCH }} --iso-8601=seconds)" -cf /tmp/root1.tar .
+tar -C /tmp/debian-{{ DIST }}-mm --numeric-owner --sort=name --clamp-mtime --mtime="$(date --utc --date=@{{ SOURCE_DATE_EPOCH }} --iso-8601=seconds)" -cf /tmp/root2.tar .
+tar --full-time --verbose -tf /tmp/root1.tar > /tmp/root1.tar.list
+tar --full-time --verbose -tf /tmp/root2.tar > /tmp/root2.tar.list
+diff -u /tmp/root1.tar.list /tmp/root2.tar.list >&2
+rm /tmp/root1.tar /tmp/root2.tar /tmp/root1.tar.list /tmp/root2.tar.list
+
+# check if file properties (permissions, ownership, symlink names, modification time) differ
+#
+# we cannot use this (yet) because it cannot cope with paths that have [ or @ in them
+#fmtree -c -p /tmp/debian-{{ DIST }}-debootstrap -k flags,gid,link,mode,size,time,uid | sudo fmtree -p /tmp/debian-{{ DIST }}-mm
+
+rm -r /tmp/debian-{{ DIST }}-debootstrap /tmp/debian-{{ DIST }}-mm
diff --git a/tests/check-for-bit-by-bit-identical-format-output b/tests/check-for-bit-by-bit-identical-format-output
new file mode 100644
index 0000000..6cbab90
--- /dev/null
+++ b/tests/check-for-bit-by-bit-identical-format-output
@@ -0,0 +1,28 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+trap "rm -f /tmp/debian-chroot-{{ MODE }}.{{ FORMAT }}" EXIT INT TERM
+
+case {{ MODE }} in unshare|fakechroot) : ;; *) exit 1;; esac
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} {{ DIST }} /tmp/debian-chroot-{{ MODE }}.{{ FORMAT }} {{ MIRROR }}
+cmp ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.{{ FORMAT }} /tmp/debian-chroot-{{ MODE }}.{{ FORMAT }} \
+ || diffoscope ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.{{ FORMAT }} /tmp/debian-chroot-{{ MODE }}.{{ FORMAT }}
+
+# we cannot test chrootless mode here, because mmdebstrap relies on the
+# usrmerge package to set up merged-/usr and that doesn't work in chrootless
+# mode
diff --git a/tests/chroot-directory-not-accessible-by-apt-user b/tests/chroot-directory-not-accessible-by-apt-user
new file mode 100644
index 0000000..eb2d343
--- /dev/null
+++ b/tests/chroot-directory-not-accessible-by-apt-user
@@ -0,0 +1,8 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+mkdir /tmp/debian-chroot
+chmod 700 /tmp/debian-chroot
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/chrootless b/tests/chrootless
new file mode 100644
index 0000000..77490c3
--- /dev/null
+++ b/tests/chrootless
@@ -0,0 +1,16 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+trap "rm -f /tmp/chrootless.tar /tmp/root.tar" EXIT INT TERM
+# we need --hook-dir=./hooks/merged-usr because usrmerge does not understand
+# DPKG_ROOT
+for INCLUDE in '' 'apt' 'apt,build-essential' 'systemd-sysv'; do
+ for MODE in root chrootless; do
+ {{ CMD }} --mode=$MODE --variant={{ VARIANT }} --hook-dir=./hooks/merged-usr \
+ ${INCLUDE:+--include="$INCLUDE"} --skip=check/chrootless \
+ {{ DIST }} "/tmp/$MODE.tar" {{ MIRROR }}
+ done
+ cmp /tmp/root.tar /tmp/chrootless.tar || diffoscope /tmp/root.tar /tmp/chrootless.tar
+ rm /tmp/chrootless.tar /tmp/root.tar
+done
diff --git a/tests/chrootless-fakeroot b/tests/chrootless-fakeroot
new file mode 100644
index 0000000..8821fa6
--- /dev/null
+++ b/tests/chrootless-fakeroot
@@ -0,0 +1,43 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+trap "rm -f /tmp/chrootless.tar /tmp/root.tar" EXIT INT TERM
+
+[ {{ MODE }} = chrootless ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+MMTARFILTER=
+[ -x /usr/bin/mmtarfilter ] && MMTARFILTER=/usr/bin/mmtarfilter
+[ -x ./tarfilter ] && MMTARFILTER=./tarfilter
+
+# we need --hook-dir=./hooks/merged-usr because usrmerge does not understand
+# DPKG_ROOT
+# permissions drwxr-sr-x and extended attributes of ./var/log/journal/ cannot
+# be preserved under fakeroot
+# this applies to 'z' lines in files in /usr/lib/tmpfiles.d/
+for INCLUDE in '' 'apt' 'apt,build-essential' 'systemd-sysv'; do
+ {{ CMD }} --variant={{ VARIANT }} --hook-dir=./hooks/merged-usr \
+ ${INCLUDE:+--include="$INCLUDE"} \
+ {{ DIST }} - {{ MIRROR }} \
+ | "$MMTARFILTER" --path-exclude="/var/log/journal" --path-exclude="/etc/credstore*" \
+ >/tmp/root.tar
+ $prefix fakeroot {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --hook-dir=./hooks/merged-usr \
+ ${INCLUDE:+--include="$INCLUDE"} \
+ {{ DIST }} - {{ MIRROR }} \
+ | "$MMTARFILTER" --path-exclude="/var/log/journal" --path-exclude="/etc/credstore*" \
+ > /tmp/chrootless.tar
+ cmp /tmp/root.tar /tmp/chrootless.tar || diffoscope /tmp/root.tar /tmp/chrootless.tar
+ rm /tmp/chrootless.tar /tmp/root.tar
+done
diff --git a/tests/chrootless-foreign b/tests/chrootless-foreign
new file mode 100644
index 0000000..03203d0
--- /dev/null
+++ b/tests/chrootless-foreign
@@ -0,0 +1,68 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+trap "rm -f /tmp/chrootless.tar /tmp/root.tar" EXIT INT TERM
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+
+deb2qemu() {
+ case "$1" in
+ amd64) echo x86_64;;
+ arm64) echo aarch64;;
+ armel|armhf) echo arm;;
+ ppc64el) echo ppc64le;;
+ *) echo "$1";;
+ esac
+}
+if [ "$(dpkg --print-architecture)" = "arm64" ]; then
+ arch=amd64
+else
+ arch=arm64
+fi
+
+[ "$(id -u)" -eq 0 ]
+[ -e "/proc/sys/fs/binfmt_misc/qemu-$(deb2qemu "$arch")" ]
+
+
+# we need --hook-dir=./hooks/merged-usr because usrmerge does not understand
+# DPKG_ROOT
+#
+# dpkg is unable to install architecture arch:all packages with a
+# dependency on an arch:any package (perl-modules-5.34 in this case)
+# inside foreign architecture chrootless chroots, because dpkg will use
+# its own architecture as the native architecture, see #825385 and #1020533
+# So we are not testing the installation of apt,build-essential here.
+for INCLUDE in '' 'apt' 'systemd-sysv'; do
+ echo 1 > "/proc/sys/fs/binfmt_misc/qemu-$(deb2qemu "$arch")"
+ arch-test "$arch"
+ {{ CMD }} --mode=root --architecture="$arch" --variant={{ VARIANT }} \
+ --hook-dir=./hooks/merged-usr ${INCLUDE:+--include="$INCLUDE"} \
+ {{ DIST }} "/tmp/root.tar" {{ MIRROR }}
+ echo 0 > "/proc/sys/fs/binfmt_misc/qemu-$(deb2qemu "$arch")"
+ arch-test "$arch" && exit 1
+ {{ CMD }} --mode=chrootless --architecture="$arch" --variant={{ VARIANT }} \
+ --hook-dir=./hooks/merged-usr ${INCLUDE:+--include="$INCLUDE"} \
+ --skip=check/chrootless {{ DIST }} "/tmp/chrootless.tar" {{ MIRROR }}
+ # when creating a foreign architecture chroot, the tarballs are not
+ # bit-by-bit identical but contain a few remaining differences:
+ #
+ # * /etc/ld.so.cache -- hard problem, must be solved in glibc upstream
+ # * /var/lib/dpkg/triggers -- #990712
+ # * /var/cache/debconf/*.dat-old -- needs investigation
+ for tar in root chrootless; do
+ <"/tmp/$tar.tar" \
+ ./tarfilter \
+ --path-exclude=/var/cache/debconf/config.dat-old \
+ --path-exclude=/var/cache/debconf/templates.dat-old \
+ --path-exclude=/etc/ld.so.cache \
+ --path-exclude=/var/lib/dpkg/triggers/File \
+ --path-exclude=/var/lib/dpkg/triggers/ldconfig \
+ > "/tmp/$tar.tar.tmp"
+ mv "/tmp/$tar.tar.tmp" "/tmp/$tar.tar"
+ done
+ cmp /tmp/root.tar /tmp/chrootless.tar || diffoscope /tmp/root.tar /tmp/chrootless.tar
+ rm /tmp/chrootless.tar /tmp/root.tar
+done
diff --git a/tests/compare-output-with-pre-seeded-var-cache-apt-archives b/tests/compare-output-with-pre-seeded-var-cache-apt-archives
new file mode 100644
index 0000000..f0e132c
--- /dev/null
+++ b/tests/compare-output-with-pre-seeded-var-cache-apt-archives
@@ -0,0 +1,44 @@
+#!/bin/sh
+#
+# test that the user can drop archives into /var/cache/apt/archives as well as
+# into /var/cache/apt/archives/partial
+
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test requires the cache directory to be mounted on /mnt and should only be run inside a container" >&2
+ exit 1
+fi
+tmpdir=$(mktemp -d)
+trap 'rm -f "$tmpdir"/*.deb /tmp/orig.tar /tmp/test1.tar /tmp/test2.tar; rmdir "$tmpdir"' EXIT INT TERM
+
+include="--include=doc-debian"
+if [ "{{ VARIANT }}" = "custom" ]; then
+ include="$include,base-files,base-passwd,coreutils,dash,diffutils,dpkg,libc-bin,sed"
+fi
+{{ CMD }} $include --mode={{ MODE }} --variant={{ VARIANT }} \
+ --setup-hook='mkdir -p "$1"/var/cache/apt/archives/partial' \
+ --setup-hook='touch "$1"/var/cache/apt/archives/lock' \
+ --setup-hook='chmod 0640 "$1"/var/cache/apt/archives/lock' \
+ {{ DIST }} - {{ MIRROR }} > /tmp/orig.tar
+# somehow, when trying to create a tarball from the 9p mount, tar throws the
+# following error: tar: ./doc-debian_6.4_all.deb: File shrank by 132942 bytes; padding with zeros
+# to reproduce, try: tar --directory /mnt/cache/debian/pool/main/d/doc-debian/ --create --file - . | tar --directory /tmp/ --extract --file -
+# this will be different:
+# md5sum /mnt/cache/debian/pool/main/d/doc-debian/*.deb /tmp/*.deb
+# another reason to copy the files into a new directory is, that we can use shell globs
+cp /mnt/cache/debian/pool/main/b/busybox/busybox_*"_{{ HOSTARCH }}.deb" /mnt/cache/debian/pool/main/a/apt/apt_*"_{{ HOSTARCH }}.deb" "$tmpdir"
+{{ CMD }} $include --mode={{ MODE }} --variant={{ VARIANT }} \
+ --setup-hook='mkdir -p "$1"/var/cache/apt/archives/partial' \
+ --setup-hook='sync-in "'"$tmpdir"'" /var/cache/apt/archives/partial' \
+ {{ DIST }} - {{ MIRROR }} > /tmp/test1.tar
+cmp /tmp/orig.tar /tmp/test1.tar
+{{ CMD }} $include --mode={{ MODE }} --variant={{ VARIANT }} \
+ --customize-hook='touch "$1"/var/cache/apt/archives/partial' \
+ --setup-hook='mkdir -p "$1"/var/cache/apt/archives/' \
+ --setup-hook='sync-in "'"$tmpdir"'" /var/cache/apt/archives/' \
+ --setup-hook='chmod 0755 "$1"/var/cache/apt/archives/' \
+ --customize-hook='find "'"$tmpdir"'" -type f -exec md5sum "{}" \; | sed "s|"'"$tmpdir"'"|$1/var/cache/apt/archives|" | md5sum --check' \
+ {{ DIST }} - {{ MIRROR }} > /tmp/test2.tar
+cmp /tmp/orig.tar /tmp/test2.tar
diff --git a/tests/copy-mirror b/tests/copy-mirror
new file mode 100644
index 0000000..1903925
--- /dev/null
+++ b/tests/copy-mirror
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test requires the cache directory to be mounted on /mnt and should only be run inside a container" >&2
+ exit 1
+fi
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar "deb copy:///mnt/cache/debian {{ DIST }} main"
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/create-directory b/tests/create-directory
new file mode 100644
index 0000000..2d5461b
--- /dev/null
+++ b/tests/create-directory
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+chroot /tmp/debian-chroot dpkg-query --showformat '${binary:Package}\n' --show > pkglist.txt
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort > tar1.txt
diff --git a/tests/create-directory-dry-run b/tests/create-directory-dry-run
new file mode 100644
index 0000000..03226e4
--- /dev/null
+++ b/tests/create-directory-dry-run
@@ -0,0 +1,29 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+{{ CMD }} --mode={{ MODE }} --dry-run --variant=apt \
+ --setup-hook="exit 1" \
+ --essential-hook="exit 1" \
+ --customize-hook="exit 1" \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+rm /tmp/debian-chroot/dev/console
+rm /tmp/debian-chroot/dev/fd
+rm /tmp/debian-chroot/dev/full
+rm /tmp/debian-chroot/dev/null
+rm /tmp/debian-chroot/dev/ptmx
+rm /tmp/debian-chroot/dev/random
+rm /tmp/debian-chroot/dev/stderr
+rm /tmp/debian-chroot/dev/stdin
+rm /tmp/debian-chroot/dev/stdout
+rm /tmp/debian-chroot/dev/tty
+rm /tmp/debian-chroot/dev/urandom
+rm /tmp/debian-chroot/dev/zero
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
diff --git a/tests/create-foreign-tarball b/tests/create-foreign-tarball
new file mode 100644
index 0000000..bf8f13a
--- /dev/null
+++ b/tests/create-foreign-tarball
@@ -0,0 +1,77 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+case "$(dpkg --print-architecture)" in
+ arm64)
+ native_arch=arm64
+ native_gnu=aarch64-linux-gnu
+ foreign_arch=amd64
+ foreign_gnu=x86_64-linux-gnu
+ ;;
+ amd64)
+ native_arch=amd64
+ native_gnu=x86_64-linux-gnu
+ foreign_arch=arm64
+ foreign_gnu=aarch64-linux-gnu
+ ;;
+ *)
+ echo "unsupported native architecture" >&2
+ exit 1
+ ;;
+esac
+
+[ "{{ MODE }}" = "fakechroot" ] && prefix="$prefix fakechroot fakeroot"
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt --architectures="$foreign_arch" \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+# we ignore differences between architectures by ignoring some files
+# and renaming others
+{ tar -tf /tmp/debian-chroot.tar \
+ | grep -v '^\./usr/bin/i386$' \
+ | grep -v '^\./usr/bin/x86_64$' \
+ | grep -v '^\./lib64$' \
+ | grep -v '^\./usr/lib64/$' \
+ | grep -v '^\./usr/lib64/ld-linux-x86-64\.so\.2$' \
+ | grep -v '^\./usr/lib/ld-linux-aarch64\.so\.1$' \
+ | grep -v "^\\./usr/lib/$foreign_gnu/ld-linux-aarch64\\.so\\.1$" \
+ | grep -v "^\\./usr/lib/$foreign_gnu/ld-linux-x86-64\\.so\\.2$" \
+ | grep -v "^\\./usr/lib/$foreign_gnu/perl/5\\.[0-9][.0-9]\\+/.*\\.ph$" \
+ | grep -v "^\\./usr/lib/$foreign_gnu/libmvec\\.so\\.1$" \
+ | grep -v "^\\./usr/share/doc/[^/]\\+/changelog\\(\\.Debian\\)\\?\\.$foreign_arch\\.gz$" \
+ | grep -v '^\./usr/share/man/man8/i386\.8\.gz$' \
+ | grep -v '^\./usr/share/man/man8/x86_64\.8\.gz$' \
+ | sed "s/$foreign_gnu/$native_gnu/" \
+ | sed "s/$foreign_arch/$native_arch/";
+} | sort > /tmp/tar2.txt
+{ < tar1.txt \
+ grep -v '^\./usr/bin/i386$' \
+ | grep -v '^\./usr/bin/x86_64$' \
+ | grep -v '^\./lib32$' \
+ | grep -v '^\./lib64$' \
+ | grep -v '^\./libx32$' \
+ | grep -v '^\./usr/lib32/$' \
+ | grep -v '^\./usr/libx32/$' \
+ | grep -v '^\./usr/lib64/$' \
+ | grep -v '^\./usr/lib64/ld-linux-x86-64\.so\.2$' \
+ | grep -v '^\./usr/lib/ld-linux-aarch64\.so\.1$' \
+ | grep -v "^\\./usr/lib/$native_gnu/ld-linux-x86-64\\.so\\.2$" \
+ | grep -v "^\\./usr/lib/$native_gnu/ld-linux-aarch64\\.so\\.1$" \
+ | grep -v "^\\./usr/lib/$native_gnu/libmvec\\.so\\.1$" \
+ | grep -v "^\\./usr/lib/$native_gnu/perl/5\\.[0-9][.0-9]\\+/.*\\.ph$" \
+ | grep -v "^\\./usr/share/doc/[^/]\\+/changelog\\(\\.Debian\\)\\?\\.$native_arch\\.gz$" \
+ | grep -v '^\./usr/share/man/man8/i386\.8\.gz$' \
+ | grep -v '^\./usr/share/man/man8/x86_64\.8\.gz$';
+} | sort | diff -u - /tmp/tar2.txt >&2
+rm /tmp/debian-chroot.tar /tmp/tar2.txt
diff --git a/tests/create-gzip-compressed-tarball b/tests/create-gzip-compressed-tarball
new file mode 100644
index 0000000..1492df2
--- /dev/null
+++ b/tests/create-gzip-compressed-tarball
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar.gz {{ MIRROR }}
+printf '\037\213\010' | cmp --bytes=3 /tmp/debian-chroot.tar.gz -
+tar -tf /tmp/debian-chroot.tar.gz | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar.gz
diff --git a/tests/create-tarball-dry-run b/tests/create-tarball-dry-run
new file mode 100644
index 0000000..f4c5fe2
--- /dev/null
+++ b/tests/create-tarball-dry-run
@@ -0,0 +1,27 @@
+#!/bin/sh
+#
+# we are testing all variants here because with 0.7.5 we had a bug:
+# mmdebstrap sid /dev/null --simulate ==> E: cannot read /var/cache/apt/archives/
+
+set -eu
+export LC_ALL=C.UTF-8
+prefix=
+include=,
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != root ] && [ "{{ MODE }}" != auto ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+ if [ "{{ VARIANT }}" = extract ] || [ "{{ VARIANT }}" = custom ]; then
+ include="$(tr '\n' ',' < pkglist.txt)"
+ fi
+fi
+$prefix {{ CMD }} --mode={{ MODE }} --include="$include" --dry-run --variant={{ VARIANT }} {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+if [ -e /tmp/debian-chroot.tar ]; then
+ echo "/tmp/debian-chroot.tar must not be created with --dry-run" >&2
+ exit 1
+fi
diff --git a/tests/create-tarball-with-tmp-mounted-nodev b/tests/create-tarball-with-tmp-mounted-nodev
new file mode 100644
index 0000000..61ff320
--- /dev/null
+++ b/tests/create-tarball-with-tmp-mounted-nodev
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+mount -t tmpfs -o nodev,nosuid,size=400M tmpfs /tmp
+# use --customize-hook to exercise the mounting/unmounting code of block devices in root mode
+{{ CMD }} --mode=root --variant=apt --customize-hook='mount | grep /dev/full' --customize-hook='test "$(echo foo | tee /dev/full 2>&1 1>/dev/null)" = "tee: /dev/full: No space left on device"' {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/custom-tmpdir b/tests/custom-tmpdir
new file mode 100644
index 0000000..bfd3651
--- /dev/null
+++ b/tests/custom-tmpdir
@@ -0,0 +1,33 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+[ "$(id -u)" -eq 0 ]
+[ {{ MODE }} = "unshare" ]
+
+if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+fi
+prefix="runuser -u ${SUDO_USER:-user} --"
+
+# https://www.etalabs.net/sh_tricks.html
+quote () { printf %s\\n "$1" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" ; }
+homedir=$($prefix sh -c 'cd && pwd')
+# apt:test/integration/test-apt-key
+TMPDIR_ADD="This is fü\$\$ing cràzy, \$(apt -v)\$!"
+$prefix mkdir "$homedir/$TMPDIR_ADD"
+# make sure the unshared user can traverse into the TMPDIR
+chmod 711 "$homedir"
+# set permissions and sticky bit like the real /tmp
+chmod 1777 "$homedir/$TMPDIR_ADD"
+$prefix env TMPDIR="$homedir/$TMPDIR_ADD" {{ CMD }} --mode={{ MODE }} --variant=apt \
+ --setup-hook='case "$1" in '"$(quote "$homedir/$TMPDIR_ADD/mmdebstrap.")"'??????????) exit 0;; *) echo "$1"; exit 1;; esac' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+# use rmdir as a quick check that nothing is remaining in TMPDIR
+$prefix rmdir "$homedir/$TMPDIR_ADD"
+rm /tmp/debian-chroot.tar
diff --git a/tests/customize-hook b/tests/customize-hook
new file mode 100644
index 0000000..6437eac
--- /dev/null
+++ b/tests/customize-hook
@@ -0,0 +1,16 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rm -f /tmp/customize.sh" EXIT INT TERM
+cat << 'SCRIPT' > /tmp/customize.sh
+#!/bin/sh
+chroot "$1" whoami > "$1/output2"
+chroot "$1" pwd >> "$1/output2"
+SCRIPT
+chmod +x /tmp/customize.sh
+{{ CMD }} --mode=root --variant=apt --customize-hook='chroot "$1" sh -c "whoami; pwd" > "$1/output1"' --customize-hook=/tmp/customize.sh {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf "root\n/\n" | cmp /tmp/debian-chroot/output1
+printf "root\n/\n" | cmp /tmp/debian-chroot/output2
+rm /tmp/debian-chroot/output1
+rm /tmp/debian-chroot/output2
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/cwd-directory-not-accessible-by-unshared-user b/tests/cwd-directory-not-accessible-by-unshared-user
new file mode 100644
index 0000000..859cf6b
--- /dev/null
+++ b/tests/cwd-directory-not-accessible-by-unshared-user
@@ -0,0 +1,30 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+[ "$(id -u)" -eq 0 ]
+[ {{ MODE }} = "unshare" ]
+
+if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+fi
+prefix="runuser -u ${SUDO_USER:-user} --"
+
+mkdir /tmp/debian-chroot
+chmod 700 /tmp/debian-chroot
+chown "${SUDO_USER:-user}:${SUDO_USER:-user}" /tmp/debian-chroot
+set -- env --chdir=/tmp/debian-chroot
+if [ "{{ CMD }}" = "./mmdebstrap" ]; then
+ set -- "$@" "$(realpath --canonicalize-existing ./mmdebstrap)"
+elif [ "{{ CMD }}" = "perl -MDevel::Cover=-silent,-nogcov ./mmdebstrap" ]; then
+ set -- "$@" perl -MDevel::Cover=-silent,-nogcov "$(realpath --canonicalize-existing ./mmdebstrap)"
+else
+ set -- "$@" {{ CMD }}
+fi
+$prefix "$@" --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/deb822-1-2 b/tests/deb822-1-2
new file mode 100644
index 0000000..1459117
--- /dev/null
+++ b/tests/deb822-1-2
@@ -0,0 +1,45 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rm -f /tmp/sources.list /tmp/deb822.sources" EXIT INT TERM
+cat << SOURCES > /tmp/deb822.sources
+Types: deb
+URIs: {{ MIRROR }}1
+Suites: {{ DIST }}
+Components: main
+SOURCES
+echo "deb {{ MIRROR }}2 {{ DIST }} main" > /tmp/sources.list
+echo "deb {{ MIRROR }}3 {{ DIST }} main" \
+ | {{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} \
+ /tmp/debian-chroot \
+ /tmp/deb822.sources \
+ {{ MIRROR }}4 \
+ - \
+ "deb {{ MIRROR }}5 {{ DIST }} main" \
+ {{ MIRROR }}6 \
+ /tmp/sources.list
+test ! -e /tmp/debian-chroot/etc/apt/sources.list
+cat << SOURCES | cmp /tmp/debian-chroot/etc/apt/sources.list.d/0000deb822.sources -
+Types: deb
+URIs: {{ MIRROR }}1
+Suites: {{ DIST }}
+Components: main
+SOURCES
+cat << SOURCES | cmp /tmp/debian-chroot/etc/apt/sources.list.d/0001main.list -
+deb {{ MIRROR }}4 {{ DIST }} main
+
+deb {{ MIRROR }}3 {{ DIST }} main
+
+deb {{ MIRROR }}5 {{ DIST }} main
+
+deb {{ MIRROR }}6 {{ DIST }} main
+SOURCES
+echo "deb {{ MIRROR }}2 {{ DIST }} main" | cmp /tmp/debian-chroot/etc/apt/sources.list.d/0002sources.list -
+tar -C /tmp/debian-chroot --one-file-system -c . \
+ | {
+ tar -t \
+ | grep -v "^./etc/apt/sources.list.d/0000deb822.sources$" \
+ | grep -v "^./etc/apt/sources.list.d/0001main.list$" \
+ | grep -v "^./etc/apt/sources.list.d/0002sources.list";
+ printf "./etc/apt/sources.list\n";
+ } | sort | diff -u tar1.txt -
diff --git a/tests/deb822-2-2 b/tests/deb822-2-2
new file mode 100644
index 0000000..c533264
--- /dev/null
+++ b/tests/deb822-2-2
@@ -0,0 +1,44 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rm -f /tmp/sources /tmp/deb822" EXIT INT TERM
+cat << SOURCES > /tmp/deb822
+Types: deb
+URIs: {{ MIRROR }}1
+Suites: {{ DIST }}
+Components: main
+SOURCES
+echo "deb {{ MIRROR }}2 {{ DIST }} main" > /tmp/sources
+cat << SOURCES | {{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} \
+ /tmp/debian-chroot \
+ /tmp/deb822 \
+ - \
+ /tmp/sources
+Types: deb
+URIs: {{ MIRROR }}3
+Suites: {{ DIST }}
+Components: main
+SOURCES
+test ! -e /tmp/debian-chroot/etc/apt/sources.list
+ls -lha /tmp/debian-chroot/etc/apt/sources.list.d/
+cat << SOURCES | cmp /tmp/debian-chroot/etc/apt/sources.list.d/0000deb822.sources -
+Types: deb
+URIs: {{ MIRROR }}1
+Suites: {{ DIST }}
+Components: main
+SOURCES
+cat << SOURCES | cmp /tmp/debian-chroot/etc/apt/sources.list.d/0001main.sources -
+Types: deb
+URIs: {{ MIRROR }}3
+Suites: {{ DIST }}
+Components: main
+SOURCES
+echo "deb {{ MIRROR }}2 {{ DIST }} main" | cmp /tmp/debian-chroot/etc/apt/sources.list.d/0002sources.list -
+tar -C /tmp/debian-chroot --one-file-system -c . \
+ | {
+ tar -t \
+ | grep -v "^./etc/apt/sources.list.d/0000deb822.sources$" \
+ | grep -v "^./etc/apt/sources.list.d/0001main.sources$" \
+ | grep -v "^./etc/apt/sources.list.d/0002sources.list$";
+ printf "./etc/apt/sources.list\n";
+ } | sort | diff -u tar1.txt -
diff --git a/tests/debootstrap b/tests/debootstrap
new file mode 100644
index 0000000..63c217d
--- /dev/null
+++ b/tests/debootstrap
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+tmpdir="$(mktemp -d)"
+chmod 755 "$tmpdir"
+debootstrap "$([ "{{ DIST }}" = oldstable ] && echo --no-merged-usr || echo --merged-usr)" --variant={{ VARIANT }} {{ DIST }} "$tmpdir" {{ MIRROR }}
+tar --sort=name --mtime=@$SOURCE_DATE_EPOCH --clamp-mtime --numeric-owner --one-file-system --xattrs -C "$tmpdir" -c . > "./cache/debian-{{ DIST }}-{{ VARIANT }}.tar"
+rm -r "$tmpdir"
diff --git a/tests/debootstrap-no-op-options b/tests/debootstrap-no-op-options
new file mode 100644
index 0000000..cd41681
--- /dev/null
+++ b/tests/debootstrap-no-op-options
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+{{ CMD }} --mode=root --variant=apt --resolve-deps --merged-usr --no-merged-usr --force-check-gpg {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot
diff --git a/tests/debug b/tests/debug
new file mode 100644
index 0000000..5612115
--- /dev/null
+++ b/tests/debug
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+
+# we use variant standard in verbose mode to see the maximum number of packages
+# that was chosen in case of USE_HOST_APT_CONFIG=yes
+# we use variant important on arches where variant standard is not bit-by-bit
+# reproducible due to #1031276
+case {{ VARIANT }} in standard|-) : ;; *) exit 1;; esac
+
+{{ CMD }} --variant={{ VARIANT }} --debug {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+
+cmp ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.tar /tmp/debian-chroot.tar \
+ || diffoscope ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.tar /tmp/debian-chroot.tar
diff --git a/tests/debug-output-on-fake-tty b/tests/debug-output-on-fake-tty
new file mode 100644
index 0000000..c8c8a87
--- /dev/null
+++ b/tests/debug-output-on-fake-tty
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+script -qfc "{{ CMD }} --mode={{ MODE }} --debug --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}" /dev/null
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/dev-ptmx b/tests/dev-ptmx
new file mode 100644
index 0000000..5eb7bd0
--- /dev/null
+++ b/tests/dev-ptmx
@@ -0,0 +1,149 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+if [ {{ MODE }} != unshare ] && [ {{ MODE }} != root ]; then
+ echo "test requires root or unshare mode" >&2
+ exit 1
+fi
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+# this mimics what apt does in apt-pkg/deb/dpkgpm.cc/pkgDPkgPM::StartPtyMagic()
+cat > /tmp/test.c << 'END'
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <fcntl.h>
+#include <termios.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/ioctl.h>
+#include <signal.h>
+
+int main() {
+ int ret;
+ int fd = posix_openpt(O_RDWR | O_NOCTTY);
+ if (fd < 0) {
+ perror("posix_openpt");
+ return 1;
+ }
+ char buf[64]; // 64 is used by apt
+ ret = ptsname_r(fd, buf, sizeof(buf));
+ if (ret != 0) {
+ perror("ptsname_r");
+ return 1;
+ }
+ ret = grantpt(fd);
+ if (ret == -1) {
+ perror("grantpt");
+ return 1;
+ }
+ struct termios origtt;
+ ret = tcgetattr(STDIN_FILENO, &origtt);
+ if (ret != 0) {
+ perror("tcgetattr1");
+ return 1;
+ }
+ struct termios tt;
+ ret = tcgetattr(STDOUT_FILENO, &tt);
+ if (ret != 0) {
+ perror("tcgetattr2");
+ return 1;
+ }
+ struct winsize win;
+ ret = ioctl(STDOUT_FILENO, TIOCGWINSZ, &win);
+ if (ret < 0) {
+ perror("ioctl stdout TIOCGWINSZ");
+ return 1;
+ }
+ ret = ioctl(fd, TIOCSWINSZ, &win);
+ if (ret < 0) {
+ perror("ioctl fd TIOCGWINSZ");
+ return 1;
+ }
+ ret = tcsetattr(fd, TCSANOW, &tt);
+ if (ret != 0) {
+ perror("tcsetattr1");
+ return 1;
+ }
+ cfmakeraw(&tt);
+ tt.c_lflag &= ~ECHO;
+ tt.c_lflag |= ISIG;
+ sigset_t sigmask;
+ sigset_t sigmask_old;
+ ret = sigemptyset(&sigmask);
+ if (ret != 0) {
+ perror("sigemptyset");
+ return 1;
+ }
+ ret = sigaddset(&sigmask, SIGTTOU);
+ if (ret != 0) {
+ perror("sigaddset");
+ return 1;
+ }
+ ret = sigprocmask(SIG_BLOCK,&sigmask, &sigmask_old);
+ if (ret != 0) {
+ perror("sigprocmask1");
+ return 1;
+ }
+ ret = tcsetattr(STDIN_FILENO, TCSAFLUSH, &tt);
+ if (ret != 0) {
+ perror("tcsetattr2");
+ return 1;
+ }
+ ret = sigprocmask(SIG_BLOCK,&sigmask_old, NULL);
+ if (ret != 0) {
+ perror("sigprocmask2");
+ return 1;
+ }
+ ret = tcsetattr(STDIN_FILENO, TCSAFLUSH, &origtt);
+ if (ret != 0) {
+ perror("tcsetattr3");
+ return 1;
+ }
+ return 0;
+}
+END
+
+# use script to create a fake tty
+# run all tests as root and as a normal user (the latter requires ptmxmode=666)
+script -qfec "$prefix {{ CMD }} --mode={{ MODE }} --variant=apt \
+ --include=gcc,libc6-dev,python3,passwd \
+ --customize-hook='chroot \"\$1\" useradd --home-dir /home/user --create-home user' \
+ --customize-hook='chroot \"\$1\" python3 -c \"import pty; print(pty.openpty())\"' \
+ --customize-hook='chroot \"\$1\" runuser -u user -- python3 -c \"import pty; print(pty.openpty())\"' \
+ --customize-hook='chroot \"\$1\" script -c \"echo foobar\"' \
+ --customize-hook='chroot \"\$1\" runuser -u user -- env --chdir=/home/user script -c \"echo foobar\"' \
+ --customize-hook='chroot \"\$1\" apt-get install --yes doc-debian 2>&1 | tee \"\$1\"/tmp/log' \
+ --customize-hook=\"copy-in /tmp/test.c /tmp\" \
+ --customize-hook='chroot \"\$1\" gcc /tmp/test.c -o /tmp/test' \
+ --customize-hook='chroot \"\$1\" /tmp/test' \
+ --customize-hook='chroot \"\$1\" runuser -u user -- /tmp/test' \
+ --customize-hook='rm \"\$1\"/tmp/test \"\$1\"/tmp/test.c' \
+ --customize-hook=\"copy-out /tmp/log /tmp\" \
+ {{ DIST }} /dev/null {{ MIRROR }}" /dev/null
+
+fail=0
+[ -r /tmp/log ] || fail=1
+grep '^E:' /tmp/log && fail=1
+grep 'Can not write log' /tmp/log && fail=1
+grep 'posix_openpt' /tmp/log && fail=1
+grep 'No such file or directory' /tmp/log && fail=1
+if [ $fail -eq 1 ]; then
+ echo "apt failed to write log:" >&2
+ cat /tmp/log >&2
+ exit 1
+fi
+
+rm /tmp/test.c /tmp/log
diff --git a/tests/directory-ending-in-tar b/tests/directory-ending-in-tar
new file mode 100644
index 0000000..b44e35a
--- /dev/null
+++ b/tests/directory-ending-in-tar
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+[ "$(whoami)" = "root" ]
+trap "rm -rf /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=apt --format=directory {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+ftype=$(stat -c %F /tmp/debian-chroot.tar)
+if [ "$ftype" != directory ]; then
+ echo "expected directory but got: $ftype" >&2
+ exit 1
+fi
+tar -C /tmp/debian-chroot.tar --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/dist-using-codename b/tests/dist-using-codename
new file mode 100644
index 0000000..96d8929
--- /dev/null
+++ b/tests/dist-using-codename
@@ -0,0 +1,13 @@
+#!/bin/sh
+#
+# make sure that using codenames works https://bugs.debian.org/1003191
+
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f InRelease; rm -rf /tmp/debian-chroot.tar /tmp/expected" EXIT INT TERM
+/usr/lib/apt/apt-helper download-file "{{ MIRROR }}/dists/{{ DIST }}/InRelease" InRelease
+codename=$(awk '/^Codename: / { print $2; }' InRelease)
+{{ CMD }} --mode={{ MODE }} --variant=apt "$codename" /tmp/debian-chroot.tar {{ MIRROR }}
+echo "deb {{ MIRROR }} $codename main" > /tmp/expected
+tar --to-stdout --extract --file /tmp/debian-chroot.tar ./etc/apt/sources.list \
+ | diff -u /tmp/expected -
diff --git a/tests/dpkgopt b/tests/dpkgopt
new file mode 100644
index 0000000..1a41da4
--- /dev/null
+++ b/tests/dpkgopt
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rm -f /tmp/config" EXIT INT TERM
+echo no-pager > /tmp/config
+{{ CMD }} --mode=root --variant=apt --dpkgopt="path-exclude=/usr/share/doc/*" --dpkgopt=/tmp/config --dpkgopt="path-include=/usr/share/doc/dpkg/copyright" {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf 'path-exclude=/usr/share/doc/*\nno-pager\npath-include=/usr/share/doc/dpkg/copyright\n' | cmp /tmp/debian-chroot/etc/dpkg/dpkg.cfg.d/99mmdebstrap -
+rm /tmp/debian-chroot/etc/dpkg/dpkg.cfg.d/99mmdebstrap
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort > tar2.txt
+{ grep -v '^./usr/share/doc/.' tar1.txt; echo ./usr/share/doc/dpkg/; echo ./usr/share/doc/dpkg/copyright; } | sort | diff -u - tar2.txt
diff --git a/tests/eatmydata-via-hook-dir b/tests/eatmydata-via-hook-dir
new file mode 100644
index 0000000..0df72df
--- /dev/null
+++ b/tests/eatmydata-via-hook-dir
@@ -0,0 +1,43 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+cat << SCRIPT > /tmp/checkeatmydata.sh
+#!/bin/sh
+set -exu
+cat << EOF | diff - "\$1"/usr/bin/dpkg
+#!/bin/sh
+exec /usr/bin/eatmydata /usr/bin/dpkg.distrib "\\\$@"
+EOF
+[ -e "\$1"/usr/bin/eatmydata ]
+SCRIPT
+chmod +x /tmp/checkeatmydata.sh
+# first four bytes: magic
+elfheader="\\177ELF"
+# fifth byte: bits
+case "$(dpkg-architecture -qDEB_HOST_ARCH_BITS)" in
+ 32) elfheader="$elfheader\\001";;
+ 64) elfheader="$elfheader\\002";;
+ *) echo "bits not supported"; exit 1;;
+esac
+# sixth byte: endian
+case "$(dpkg-architecture -qDEB_HOST_ARCH_ENDIAN)" in
+ little) elfheader="$elfheader\\001";;
+ big) elfheader="$elfheader\\002";;
+ *) echo "endian not supported"; exit 1;;
+esac
+# seventh and eigth byte: elf version (1) and abi (unset)
+elfheader="$elfheader\\001\\000"
+{{ CMD }} --mode=root --variant=apt \
+ --customize-hook=/tmp/checkeatmydata.sh \
+ --essential-hook=/tmp/checkeatmydata.sh \
+ --extract-hook='printf "'"$elfheader"'" | cmp --bytes=8 - "$1"/usr/bin/dpkg' \
+ --hook-dir=./hooks/eatmydata \
+ --customize-hook='printf "'"$elfheader"'" | cmp --bytes=8 - "$1"/usr/bin/dpkg' \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+ tar -C /tmp/debian-chroot --one-file-system -c . \
+ | tar -t \
+ | sort \
+ | grep -v '^\./var/lib/dpkg/diversions\(-old\)\?$' \
+ | diff -u tar1.txt -
+rm /tmp/checkeatmydata.sh
+rm -r /tmp/debian-chroot
diff --git a/tests/empty-sources.list b/tests/empty-sources.list
new file mode 100644
index 0000000..bf384f3
--- /dev/null
+++ b/tests/empty-sources.list
@@ -0,0 +1,8 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+printf '' | {{ CMD }} --mode={{ MODE }} --variant=apt \
+ --setup-hook='echo "deb {{ MIRROR }} {{ DIST }} main" > "$1"/etc/apt/sources.list' \
+ {{ DIST }} /tmp/debian-chroot.tar -
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/error-if-stdout-is-tty b/tests/error-if-stdout-is-tty
new file mode 100644
index 0000000..b4f6923
--- /dev/null
+++ b/tests/error-if-stdout-is-tty
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+set -eu
+
+export LC_ALL=C.UTF-8
+
+ret=0
+script -qfec "{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} - {{ MIRROR }}" /dev/null || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/essential-hook b/tests/essential-hook
new file mode 100644
index 0000000..dc2b01f
--- /dev/null
+++ b/tests/essential-hook
@@ -0,0 +1,21 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rm -f /tmp/essential.sh" EXIT INT TERM
+cat << 'SCRIPT' > /tmp/essential.sh
+#!/bin/sh
+echo tzdata tzdata/Zones/Europe select Berlin | chroot "$1" debconf-set-selections
+SCRIPT
+chmod +x /tmp/essential.sh
+{{ CMD }} --mode=root --variant=apt --include=tzdata --essential-hook='echo tzdata tzdata/Areas select Europe | chroot "$1" debconf-set-selections' --essential-hook=/tmp/essential.sh {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+[ "$(readlink /tmp/debian-chroot/etc/localtime)" = "/usr/share/zoneinfo/Europe/Berlin" ]
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort \
+ | grep -v '^./etc/localtime' \
+ | grep -v '^./etc/timezone' \
+ | grep -v '^./usr/sbin/tzconfig' \
+ | grep -v '^./usr/share/doc/tzdata' \
+ | grep -v '^./usr/share/lintian/overrides/tzdata' \
+ | grep -v '^./usr/share/zoneinfo' \
+ | grep -v '^./var/lib/dpkg/info/tzdata.' \
+ | grep -v '^./var/lib/apt/extended_states$' \
+ | diff -u tar1.txt -
diff --git a/tests/existing-directory-with-lost-found b/tests/existing-directory-with-lost-found
new file mode 100644
index 0000000..9757d06
--- /dev/null
+++ b/tests/existing-directory-with-lost-found
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+mkdir /tmp/debian-chroot
+mkdir /tmp/debian-chroot/lost+found
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+rmdir /tmp/debian-chroot/lost+found
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/existing-empty-directory b/tests/existing-empty-directory
new file mode 100644
index 0000000..23efeea
--- /dev/null
+++ b/tests/existing-empty-directory
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+mkdir /tmp/debian-chroot
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/fail-installing-to-existing-file b/tests/fail-installing-to-existing-file
new file mode 100644
index 0000000..916c98c
--- /dev/null
+++ b/tests/fail-installing-to-existing-file
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -f /tmp/exists" EXIT INT TERM
+
+touch /tmp/exists
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/exists {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/fail-installing-to-non-empty-lost-found b/tests/fail-installing-to-non-empty-lost-found
new file mode 100644
index 0000000..4130bf5
--- /dev/null
+++ b/tests/fail-installing-to-non-empty-lost-found
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm /tmp/debian-chroot/lost+found/exists; rmdir /tmp/debian-chroot/lost+found /tmp/debian-chroot" EXIT INT TERM
+mkdir /tmp/debian-chroot
+mkdir /tmp/debian-chroot/lost+found
+touch /tmp/debian-chroot/lost+found/exists
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/fail-installing-to-non-empty-target-directory b/tests/fail-installing-to-non-empty-target-directory
new file mode 100644
index 0000000..2606b7f
--- /dev/null
+++ b/tests/fail-installing-to-non-empty-target-directory
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rmdir /tmp/debian-chroot/lost+found; rm /tmp/debian-chroot/exists; rmdir /tmp/debian-chroot" EXIT INT TERM
+mkdir /tmp/debian-chroot
+mkdir /tmp/debian-chroot/lost+found
+touch /tmp/debian-chroot/exists
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/fail-installing-to-root b/tests/fail-installing-to-root
new file mode 100644
index 0000000..ded6b5d
--- /dev/null
+++ b/tests/fail-installing-to-root
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} / {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/fail-with-missing-lz4 b/tests/fail-with-missing-lz4
new file mode 100644
index 0000000..71c6d60
--- /dev/null
+++ b/tests/fail-with-missing-lz4
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar.lz4 {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/fail-with-path-with-quotes b/tests/fail-with-path-with-quotes
new file mode 100644
index 0000000..5483ff1
--- /dev/null
+++ b/tests/fail-with-path-with-quotes
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap 'rm -rf /tmp/quoted\"path' EXIT INT TERM
+
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/quoted\"path {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/fail-without-etc-subuid b/tests/fail-without-etc-subuid
new file mode 100644
index 0000000..7a0b146
--- /dev/null
+++ b/tests/fail-without-etc-subuid
@@ -0,0 +1,16 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+useradd --home-dir /home/user --create-home user
+rm /etc/subuid
+ret=0
+runuser -u user -- {{ CMD }} --mode=unshare --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
+[ ! -e /tmp/debian-chroot ]
diff --git a/tests/fail-without-username-in-etc-subuid b/tests/fail-without-username-in-etc-subuid
new file mode 100644
index 0000000..319eadf
--- /dev/null
+++ b/tests/fail-without-username-in-etc-subuid
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+useradd --home-dir /home/user --create-home user
+awk -F: '$1!="user"' /etc/subuid > /etc/subuid.tmp
+mv /etc/subuid.tmp /etc/subuid
+ret=0
+runuser -u user -- {{ CMD }} --mode=unshare --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }} || ret=$?
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
+[ ! -e /tmp/debian-chroot ]
diff --git a/tests/failing-customize-hook b/tests/failing-customize-hook
new file mode 100644
index 0000000..8ecc065
--- /dev/null
+++ b/tests/failing-customize-hook
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+ret=0
+{{ CMD }} --mode=root --variant=apt --customize-hook='chroot "$1" sh -c "exit 1"' {{ DIST }} /tmp/debian-chroot {{ MIRROR }} || ret=$?
+rm -r /tmp/debian-chroot
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/file-mirror b/tests/file-mirror
new file mode 100644
index 0000000..b0388bb
--- /dev/null
+++ b/tests/file-mirror
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test requires the cache directory to be mounted on /mnt and should only be run inside a container" >&2
+ exit 1
+fi
+{{ CMD }} --mode={{ MODE }} --variant=apt \
+ --setup-hook='mkdir -p "$1"/mnt/cache/debian; mount -o ro,bind /mnt/cache/debian "$1"/mnt/cache/debian' \
+ --customize-hook='umount "$1"/mnt/cache/debian; rmdir "$1"/mnt/cache/debian "$1"/mnt/cache' \
+ {{ DIST }} /tmp/debian-chroot.tar "deb file:///mnt/cache/debian {{ DIST }} main"
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/file-mirror-automount-hook b/tests/file-mirror-automount-hook
new file mode 100644
index 0000000..11ab330
--- /dev/null
+++ b/tests/file-mirror-automount-hook
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test requires the cache directory to be mounted on /mnt and should only be run inside a container" >&2
+ exit 1
+fi
+if [ "$(id -u)" -eq 0 ] && ! id -u user > /dev/null 2>&1; then
+ useradd --home-dir /home/user --create-home user
+fi
+prefix=
+[ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && prefix="runuser -u user --"
+[ "{{ MODE }}" = "fakechroot" ] && prefix="$prefix fakechroot fakeroot"
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt \
+ --hook-dir=./hooks/file-mirror-automount \
+ --customize-hook='[ ! -e "$1"/mnt/cache/debian/ ] || rmdir "$1"/mnt/cache/debian/' \
+ --customize-hook='rmdir "$1"/mnt/cache' \
+ {{ DIST }} /tmp/debian-chroot.tar "deb file:///mnt/cache/debian {{ DIST }} main"
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/help b/tests/help
new file mode 100644
index 0000000..535eeba
--- /dev/null
+++ b/tests/help
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+# we redirect to /dev/null instead of using --quiet to not cause a broken pipe
+# when grep exits before mmdebstrap was able to write all its output
+{{ CMD }} --help | grep --fixed-strings 'mmdebstrap [OPTION...] [SUITE [TARGET [MIRROR...]]]' >/dev/null
diff --git a/tests/hook-directory b/tests/hook-directory
new file mode 100644
index 0000000..c9b22f9
--- /dev/null
+++ b/tests/hook-directory
@@ -0,0 +1,49 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+for h in hookA hookB; do
+ mkdir /tmp/$h
+ for s in setup extract essential customize; do
+ cat << SCRIPT > /tmp/$h/${s}00.sh
+#!/bin/sh
+echo $h/${s}00 >> "\$1/$s"
+SCRIPT
+ chmod +x /tmp/$h/${s}00.sh
+ cat << SCRIPT > /tmp/$h/${s}01.sh
+echo $h/${s}01 >> "\$1/$s"
+SCRIPT
+ chmod +x /tmp/$h/${s}01.sh
+ done
+done
+{{ CMD }} --mode=root --variant=apt \
+ --setup-hook='echo cliA/setup >> "$1"/setup' \
+ --extract-hook='echo cliA/extract >> "$1"/extract' \
+ --essential-hook='echo cliA/essential >> "$1"/essential' \
+ --customize-hook='echo cliA/customize >> "$1"/customize' \
+ --hook-dir=/tmp/hookA \
+ --setup-hook='echo cliB/setup >> "$1"/setup' \
+ --extract-hook='echo cliB/extract >> "$1"/extract' \
+ --essential-hook='echo cliB/essential >> "$1"/essential' \
+ --customize-hook='echo cliB/customize >> "$1"/customize' \
+ --hook-dir=/tmp/hookB \
+ --setup-hook='echo cliC/setup >> "$1"/setup' \
+ --extract-hook='echo cliC/extract >> "$1"/extract' \
+ --essential-hook='echo cliC/essential >> "$1"/essential' \
+ --customize-hook='echo cliC/customize >> "$1"/customize' \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf "cliA/setup\nhookA/setup00\nhookA/setup01\ncliB/setup\nhookB/setup00\nhookB/setup01\ncliC/setup\n" | diff -u - /tmp/debian-chroot/setup
+printf "cliA/extract\nhookA/extract00\nhookA/extract01\ncliB/extract\nhookB/extract00\nhookB/extract01\ncliC/extract\n" | diff -u - /tmp/debian-chroot/extract
+printf "cliA/essential\nhookA/essential00\nhookA/essential01\ncliB/essential\nhookB/essential00\nhookB/essential01\ncliC/essential\n" | diff -u - /tmp/debian-chroot/essential
+printf "cliA/customize\nhookA/customize00\nhookA/customize01\ncliB/customize\nhookB/customize00\nhookB/customize01\ncliC/customize\n" | diff -u - /tmp/debian-chroot/customize
+for s in setup extract essential customize; do
+ rm /tmp/debian-chroot/$s
+done
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+for h in hookA hookB; do
+ for s in setup extract essential customize; do
+ rm /tmp/$h/${s}00.sh
+ rm /tmp/$h/${s}01.sh
+ done
+ rmdir /tmp/$h
+done
+rm -r /tmp/debian-chroot
diff --git a/tests/i386-which-can-be-executed-without-qemu b/tests/i386-which-can-be-executed-without-qemu
new file mode 100644
index 0000000..91c53df
--- /dev/null
+++ b/tests/i386-which-can-be-executed-without-qemu
@@ -0,0 +1,41 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+# remove qemu just to be sure
+apt-get remove --yes qemu-user-static binfmt-support qemu-user
+{{ CMD }} --mode={{ MODE }} --variant=apt --architectures=i386 {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+# we ignore differences between architectures by ignoring some files
+# and renaming others
+{ tar -tf /tmp/debian-chroot.tar \
+ | grep -v '^\./usr/bin/i386$' \
+ | grep -v '^\./usr/lib/ld-linux\.so\.2$' \
+ | grep -v '^\./usr/lib/i386-linux-gnu/ld-linux\.so\.2$' \
+ | grep -v '^\./usr/lib/gcc/i686-linux-gnu/$' \
+ | grep -v '^\./usr/lib/gcc/i686-linux-gnu/[0-9]\+/$' \
+ | grep -v '^\./usr/share/man/man8/i386\.8\.gz$' \
+ | grep -v '^\./usr/share/doc/[^/]\+/changelog\(\.Debian\)\?\.i386\.gz$' \
+ | sed 's/i386-linux-gnu/x86_64-linux-gnu/' \
+ | sed 's/i386/amd64/' \
+ | sed 's/\/stubs-32.ph$/\/stubs-64.ph/';
+} | sort > tar2.txt
+{ < tar1.txt \
+ grep -v '^\./usr/bin/i386$' \
+ | grep -v '^\./usr/bin/x86_64$' \
+ | grep -v '^\./usr/lib32/$' \
+ | grep -v '^\./lib32$' \
+ | grep -v '^\./lib64$' \
+ | grep -v '^\./usr/lib64/$' \
+ | grep -v '^\./usr/lib64/ld-linux-x86-64\.so\.2$' \
+ | grep -v '^\./usr/lib/gcc/x86_64-linux-gnu/$' \
+ | grep -v '^\./usr/lib/gcc/x86_64-linux-gnu/[0-9]\+/$' \
+ | grep -v '^\./usr/lib/x86_64-linux-gnu/ld-linux-x86-64\.so\.2$' \
+ | grep -v '^\./usr/lib/x86_64-linux-gnu/libmvec\.so\.1$' \
+ | grep -v '^\./usr/share/doc/[^/]\+/changelog\(\.Debian\)\?\.amd64\.gz$' \
+ | grep -v '^\./usr/share/man/man8/i386\.8\.gz$' \
+ | grep -v '^\./usr/share/man/man8/x86_64\.8\.gz$';
+} | sort | diff -u - tar2.txt >&2
+rm /tmp/debian-chroot.tar
diff --git a/tests/include b/tests/include
new file mode 100644
index 0000000..e284b7d
--- /dev/null
+++ b/tests/include
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+{{ CMD }} --mode=root --variant=apt --include=doc-debian {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+rm /tmp/debian-chroot/usr/share/doc-base/doc-debian.debian-*
+rm -r /tmp/debian-chroot/usr/share/doc/debian
+rm -r /tmp/debian-chroot/usr/share/doc/doc-debian
+rm /tmp/debian-chroot/var/lib/apt/extended_states
+rm /tmp/debian-chroot/var/lib/dpkg/info/doc-debian.list
+rm /tmp/debian-chroot/var/lib/dpkg/info/doc-debian.md5sums
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/include-deb-file b/tests/include-deb-file
new file mode 100644
index 0000000..ad31de2
--- /dev/null
+++ b/tests/include-deb-file
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -rf /tmp/dummypkg.deb /tmp/dummypkg" EXIT INT TERM
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+# instead of obtaining a .deb from our cache, we create a new package because
+# otherwise apt might decide to download the package with the same name and
+# version from the cache instead of using the local .deb
+mkdir -p /tmp/dummypkg/DEBIAN
+cat << END > "/tmp/dummypkg/DEBIAN/control"
+Package: dummypkg
+Priority: optional
+Section: oldlibs
+Maintainer: Johannes Schauer Marin Rodrigues <josch@debian.org>
+Architecture: all
+Multi-Arch: foreign
+Source: dummypkg
+Version: 1
+Description: dummypkg
+END
+dpkg-deb --build "/tmp/dummypkg" "/tmp/dummypkg.deb"
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt --include="/tmp/dummypkg.deb" \
+ --hook-dir=./hooks/file-mirror-automount \
+ --customize-hook='chroot "$1" dpkg-query -W -f="\${Status}\n" dummypkg | grep "^install ok installed$"' \
+ {{ DIST }} /dev/null {{ MIRROR }}
diff --git a/tests/include-foreign-libmagic-mgc b/tests/include-foreign-libmagic-mgc
new file mode 100644
index 0000000..127a84e
--- /dev/null
+++ b/tests/include-foreign-libmagic-mgc
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# to test foreign architecture package installation we choose a package which
+# - is not part of the native installation set
+# - does not have any dependencies
+# - installs only few files
+# - doesn't change its name regularly (like gcc-*-base)
+
+case "$(dpkg --print-architecture)" in
+ arm64)
+ native_arch=arm64
+ foreign_arch=amd64
+ ;;
+ amd64)
+ native_arch=amd64
+ foreign_arch=arm64
+ ;;
+ *)
+ echo "unsupported native architecture" >&2
+ exit 1
+ ;;
+esac
+
+set -eu
+export LC_ALL=C.UTF-8
+{{ CMD }} --mode=root --variant=apt \
+ --architectures="$native_arch,$foreign_arch" \
+ --include="libmagic-mgc:$foreign_arch" \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+{ echo "$native_arch"; echo "$foreign_arch"; } | cmp /tmp/debian-chroot/var/lib/dpkg/arch -
+rm /tmp/debian-chroot/usr/lib/file/magic.mgc
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/README.Debian
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/"changelog.Debian.$foreign_arch.gz"
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/changelog.Debian.gz
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/changelog.gz
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/copyright
+rm /tmp/debian-chroot/usr/share/file/magic.mgc
+rm /tmp/debian-chroot/usr/share/misc/magic.mgc
+rm /tmp/debian-chroot/var/lib/apt/extended_states
+rm /tmp/debian-chroot/var/lib/dpkg/info/libmagic-mgc.list
+rm /tmp/debian-chroot/var/lib/dpkg/info/libmagic-mgc.md5sums
+rmdir /tmp/debian-chroot/usr/share/doc/libmagic-mgc/
+rmdir /tmp/debian-chroot/usr/share/file/magic/
+rmdir /tmp/debian-chroot/usr/share/file/
+rmdir /tmp/debian-chroot/usr/lib/file/
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot
diff --git a/tests/include-foreign-libmagic-mgc-with-multiple-arch-options b/tests/include-foreign-libmagic-mgc-with-multiple-arch-options
new file mode 100644
index 0000000..3108134
--- /dev/null
+++ b/tests/include-foreign-libmagic-mgc-with-multiple-arch-options
@@ -0,0 +1,48 @@
+#!/bin/sh
+#
+# to test foreign architecture package installation we choose a package which
+# - is not part of the native installation set
+# - does not have any dependencies
+# - installs only few files
+# - doesn't change its name regularly (like gcc-*-base)
+
+case "$(dpkg --print-architecture)" in
+ arm64)
+ native_arch=arm64
+ foreign_arch=amd64
+ ;;
+ amd64)
+ native_arch=amd64
+ foreign_arch=arm64
+ ;;
+ *)
+ echo "unsupported native architecture" >&2
+ exit 1
+ ;;
+esac
+
+set -eu
+export LC_ALL=C.UTF-8
+{{ CMD }} --mode=root --variant=apt \
+ --architectures="$native_arch" \
+ --architectures="$foreign_arch" \
+ --include="libmagic-mgc:$foreign_arch" \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+{ echo "$native_arch"; echo "$foreign_arch"; } | cmp /tmp/debian-chroot/var/lib/dpkg/arch -
+rm /tmp/debian-chroot/usr/lib/file/magic.mgc
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/README.Debian
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/"changelog.Debian.$foreign_arch.gz"
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/changelog.Debian.gz
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/changelog.gz
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/copyright
+rm /tmp/debian-chroot/usr/share/file/magic.mgc
+rm /tmp/debian-chroot/usr/share/misc/magic.mgc
+rm /tmp/debian-chroot/var/lib/apt/extended_states
+rm /tmp/debian-chroot/var/lib/dpkg/info/libmagic-mgc.list
+rm /tmp/debian-chroot/var/lib/dpkg/info/libmagic-mgc.md5sums
+rmdir /tmp/debian-chroot/usr/share/doc/libmagic-mgc/
+rmdir /tmp/debian-chroot/usr/share/file/magic/
+rmdir /tmp/debian-chroot/usr/share/file/
+rmdir /tmp/debian-chroot/usr/lib/file/
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot
diff --git a/tests/include-with-multiple-apt-sources b/tests/include-with-multiple-apt-sources
new file mode 100644
index 0000000..1d335d4
--- /dev/null
+++ b/tests/include-with-multiple-apt-sources
@@ -0,0 +1,10 @@
+#!/bin/sh
+#
+# This checks for https://bugs.debian.org/976166
+# Since $DEFAULT_DIST varies, we hardcode stable and unstable.
+
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+{{ CMD }} --mode=root --variant=minbase --include=doc-debian unstable /tmp/debian-chroot "deb {{ MIRROR }} unstable main" "deb {{ MIRROR }} stable main"
+chroot /tmp/debian-chroot dpkg-query --show doc-debian
diff --git a/tests/install-busybox-based-sub-essential-system b/tests/install-busybox-based-sub-essential-system
new file mode 100644
index 0000000..7854f0e
--- /dev/null
+++ b/tests/install-busybox-based-sub-essential-system
@@ -0,0 +1,41 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+
+pkgs=base-files,base-passwd,busybox,debianutils,dpkg,libc-bin,mawk,tar
+# busybox --install -s will install symbolic links into the rootfs, leaving
+# existing files untouched. It has to run after extraction (otherwise there is
+# no busybox binary) and before first configuration
+{{ CMD }} --mode=root --variant=custom \
+ --include=$pkgs \
+ --setup-hook='mkdir -p "$1/bin"' \
+ --setup-hook='echo root:x:0:0:root:/root:/bin/sh > "$1/etc/passwd"' \
+ --setup-hook='printf "root:x:0:\nmail:x:8:\nutmp:x:43:\n" > "$1/etc/group"' \
+ --extract-hook='chroot "$1" busybox --install -s' \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+echo "$pkgs" | tr ',' '\n' > /tmp/expected
+chroot /tmp/debian-chroot dpkg-query -f '${binary:Package}\n' -W \
+ | comm -12 - /tmp/expected \
+ | diff -u - /tmp/expected
+rm /tmp/expected
+for cmd in echo cat sed grep; do
+ test -L /tmp/debian-chroot/bin/$cmd
+ test "$(readlink /tmp/debian-chroot/bin/$cmd)" = "/usr/bin/busybox"
+done
+for cmd in sort tee; do
+ test -L /tmp/debian-chroot/usr/bin/$cmd
+ test "$(readlink /tmp/debian-chroot/usr/bin/$cmd)" = "/usr/bin/busybox"
+done
+
+# if /bin or /sbin are not symlinks, add /bin and /sbin to PATH
+if [ ! -L /tmp/debian-chroot/bin ] || [ ! -L /tmp/debian-chroot/sbin ]; then
+ export PATH="$PATH:/sbin:/bin"
+fi
+chroot /tmp/debian-chroot echo foobar \
+ | chroot /tmp/debian-chroot cat \
+ | chroot /tmp/debian-chroot sort \
+ | chroot /tmp/debian-chroot tee /dev/null \
+ | chroot /tmp/debian-chroot sed 's/foobar/blubber/' \
+ | chroot /tmp/debian-chroot grep blubber >/dev/null
diff --git a/tests/install-doc-debian b/tests/install-doc-debian
new file mode 100644
index 0000000..27d7f3e
--- /dev/null
+++ b/tests/install-doc-debian
@@ -0,0 +1,56 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+[ {{ VARIANT }} = "custom" ]
+[ {{ MODE }} = "chrootless" ]
+
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --include=doc-debian {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+tar -C /tmp/debian-chroot --owner=0 --group=0 --numeric-owner --sort=name --clamp-mtime --mtime="$(date --utc --date=@{{ SOURCE_DATE_EPOCH }} --iso-8601=seconds)" -cf /tmp/debian-chroot.tar .
+tar tvf /tmp/debian-chroot.tar > doc-debian.tar.list
+rm /tmp/debian-chroot.tar
+# delete contents of doc-debian
+rm /tmp/debian-chroot/usr/share/doc-base/doc-debian.debian-*
+rm -r /tmp/debian-chroot/usr/share/doc/debian
+rm -r /tmp/debian-chroot/usr/share/doc/doc-debian
+# delete real files
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+rm /tmp/debian-chroot/var/cache/apt/archives/lock
+rm /tmp/debian-chroot/var/lib/dpkg/lock
+rm /tmp/debian-chroot/var/lib/dpkg/lock-frontend
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+## delete merged usr symlinks
+#rm /tmp/debian-chroot/libx32
+#rm /tmp/debian-chroot/lib64
+#rm /tmp/debian-chroot/lib32
+#rm /tmp/debian-chroot/sbin
+#rm /tmp/debian-chroot/bin
+#rm /tmp/debian-chroot/lib
+# in chrootless mode, there is more to remove
+rm /tmp/debian-chroot/var/lib/dpkg/triggers/Lock
+rm /tmp/debian-chroot/var/lib/dpkg/triggers/Unincorp
+rm /tmp/debian-chroot/var/lib/dpkg/status-old
+rm /tmp/debian-chroot/var/lib/dpkg/info/format
+rm /tmp/debian-chroot/var/lib/dpkg/info/doc-debian.md5sums
+rm /tmp/debian-chroot/var/lib/dpkg/info/doc-debian.list
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
diff --git a/tests/install-doc-debian-and-output-tarball b/tests/install-doc-debian-and-output-tarball
new file mode 100644
index 0000000..118ae89
--- /dev/null
+++ b/tests/install-doc-debian-and-output-tarball
@@ -0,0 +1,23 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+[ {{ VARIANT }} = "custom" ]
+[ {{ MODE }} = "chrootless" ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --include=doc-debian {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar tvf /tmp/debian-chroot.tar | grep -v ' ./dev' | diff -u doc-debian.tar.list -
+rm /tmp/debian-chroot.tar
diff --git a/tests/install-doc-debian-and-test-hooks b/tests/install-doc-debian-and-test-hooks
new file mode 100644
index 0000000..e69066c
--- /dev/null
+++ b/tests/install-doc-debian-and-test-hooks
@@ -0,0 +1,59 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+[ {{ VARIANT }} = "custom" ]
+[ {{ MODE }} = "chrootless" ]
+
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --skip=cleanup/tmp --variant={{ VARIANT }} --include=doc-debian --setup-hook='touch "$1/tmp/setup"' --customize-hook='touch "$1/tmp/customize"' {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+rm /tmp/debian-chroot/tmp/setup
+rm /tmp/debian-chroot/tmp/customize
+tar -C /tmp/debian-chroot --owner=0 --group=0 --numeric-owner --sort=name --clamp-mtime --mtime="$(date --utc --date=@{{ SOURCE_DATE_EPOCH }} --iso-8601=seconds)" -cf /tmp/debian-chroot.tar .
+tar tvf /tmp/debian-chroot.tar | grep -v ' ./dev' | diff -u doc-debian.tar.list -
+rm /tmp/debian-chroot.tar
+# delete contents of doc-debian
+rm /tmp/debian-chroot/usr/share/doc-base/doc-debian.debian-*
+rm -r /tmp/debian-chroot/usr/share/doc/debian
+rm -r /tmp/debian-chroot/usr/share/doc/doc-debian
+# delete real files
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+rm /tmp/debian-chroot/var/cache/apt/archives/lock
+rm /tmp/debian-chroot/var/lib/dpkg/lock
+rm /tmp/debian-chroot/var/lib/dpkg/lock-frontend
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+## delete merged usr symlinks
+#rm /tmp/debian-chroot/libx32
+#rm /tmp/debian-chroot/lib64
+#rm /tmp/debian-chroot/lib32
+#rm /tmp/debian-chroot/sbin
+#rm /tmp/debian-chroot/bin
+#rm /tmp/debian-chroot/lib
+# in chrootless mode, there is more to remove
+rm /tmp/debian-chroot/var/lib/dpkg/triggers/Lock
+rm /tmp/debian-chroot/var/lib/dpkg/triggers/Unincorp
+rm /tmp/debian-chroot/var/lib/dpkg/status-old
+rm /tmp/debian-chroot/var/lib/dpkg/info/format
+rm /tmp/debian-chroot/var/lib/dpkg/info/doc-debian.md5sums
+rm /tmp/debian-chroot/var/lib/dpkg/info/doc-debian.list
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
diff --git a/tests/install-libmagic-mgc-on-foreign b/tests/install-libmagic-mgc-on-foreign
new file mode 100644
index 0000000..918224b
--- /dev/null
+++ b/tests/install-libmagic-mgc-on-foreign
@@ -0,0 +1,69 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+[ {{ VARIANT }} = "custom" ]
+[ {{ MODE }} = "chrootless" ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+case "$(dpkg --print-architecture)" in
+ arm64)
+ foreign_arch=amd64
+ ;;
+ amd64)
+ foreign_arch=arm64
+ ;;
+ *)
+ echo "unsupported native architecture" >&2
+ exit 1
+ ;;
+esac
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --architectures="$foreign_arch" --include=libmagic-mgc {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+# delete contents of libmagic-mgc
+rm /tmp/debian-chroot/usr/lib/file/magic.mgc
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/README.Debian
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/changelog.Debian.gz
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/changelog.gz
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/copyright
+rm /tmp/debian-chroot/usr/share/doc/libmagic-mgc/"changelog.Debian.$foreign_arch.gz"
+rm /tmp/debian-chroot/usr/share/file/magic.mgc
+rm /tmp/debian-chroot/usr/share/misc/magic.mgc
+# delete real files
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/cache/apt/archives/lock
+rm /tmp/debian-chroot/var/lib/dpkg/lock
+rm /tmp/debian-chroot/var/lib/dpkg/lock-frontend
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+## delete merged usr symlinks
+#rm /tmp/debian-chroot/libx32
+#rm /tmp/debian-chroot/lib64
+#rm /tmp/debian-chroot/lib32
+#rm /tmp/debian-chroot/sbin
+#rm /tmp/debian-chroot/bin
+#rm /tmp/debian-chroot/lib
+# in chrootless mode, there is more to remove
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+rm /tmp/debian-chroot/var/lib/dpkg/triggers/Lock
+rm /tmp/debian-chroot/var/lib/dpkg/triggers/Unincorp
+rm /tmp/debian-chroot/var/lib/dpkg/status-old
+rm /tmp/debian-chroot/var/lib/dpkg/info/format
+rm /tmp/debian-chroot/var/lib/dpkg/info/libmagic-mgc.md5sums
+rm /tmp/debian-chroot/var/lib/dpkg/info/libmagic-mgc.list
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
diff --git a/tests/invalid-mirror b/tests/invalid-mirror
new file mode 100644
index 0000000..97cde05
--- /dev/null
+++ b/tests/invalid-mirror
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}/invalid || ret=$?
+rm /tmp/debian-chroot.tar
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/jessie-or-older b/tests/jessie-or-older
new file mode 100644
index 0000000..a3a2ace
--- /dev/null
+++ b/tests/jessie-or-older
@@ -0,0 +1,42 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+trap "rm -f /tmp/debian-chroot-{{ MODE }}.tar /tmp/debian-chroot-root-normal.tar" EXIT INT TERM
+
+[ "$(id -u)" -eq 0 ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+MMTARFILTER=
+[ -x /usr/bin/mmtarfilter ] && MMTARFILTER=/usr/bin/mmtarfilter
+[ -x ./tarfilter ] && MMTARFILTER=./tarfilter
+
+filter() {
+ "$MMTARFILTER" \
+ --path-exclude=/usr/bin/uncompress \
+ --path-exclude=/var/cache/debconf/config.dat-old \
+ --path-exclude=/var/cache/debconf/templates.dat-old \
+ --path-exclude=/var/lib/dpkg/available \
+ --path-exclude=/var/lib/dpkg/diversions \
+ --path-exclude=/var/lib/dpkg/cmethopt \
+ --path-exclude=/var/lib/dpkg/status-old \
+ --path-exclude=/var/lib/shells.state
+}
+
+# base for comparison without jessie-or-older hook
+{{ CMD }} --mode=root --variant={{ VARIANT }} {{ DIST }} - {{ MIRROR }} > /tmp/debian-chroot-root-normal.tar
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --hook-dir=./hooks/jessie-or-older {{ DIST }} - {{ MIRROR }} | filter > /tmp/debian-chroot-{{ MODE }}.tar
+filter < /tmp/debian-chroot-root-normal.tar | cmp - /tmp/debian-chroot-{{ MODE }}.tar
diff --git a/tests/keyring b/tests/keyring
new file mode 100644
index 0000000..7308f0d
--- /dev/null
+++ b/tests/keyring
@@ -0,0 +1,18 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+for f in /etc/apt/trusted.gpg.d/*.gpg /etc/apt/trusted.gpg.d/*.asc; do
+ [ -e "$f" ] || continue
+ rm "$f"
+done
+rmdir /etc/apt/trusted.gpg.d
+mkdir /etc/apt/trusted.gpg.d
+{{ CMD }} --mode=root --variant=apt --keyring=/usr/share/keyrings/debian-archive-keyring.gpg --keyring=/usr/share/keyrings/ {{ DIST }} /tmp/debian-chroot "deb {{ MIRROR }} {{ DIST }} main"
+# make sure that no [signedby=...] managed to make it into the sources.list
+echo "deb {{ MIRROR }} {{ DIST }} main" | cmp /tmp/debian-chroot/etc/apt/sources.list -
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot
diff --git a/tests/keyring-overwrites b/tests/keyring-overwrites
new file mode 100644
index 0000000..f070654
--- /dev/null
+++ b/tests/keyring-overwrites
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot; rmdir /tmp/emptydir; rm -f /tmp/emptyfile" EXIT INT TERM
+mkdir -p /tmp/emptydir
+touch /tmp/emptyfile
+# this overwrites the apt keyring options and should fail
+ret=0
+{{ CMD }} --mode=root --variant=apt --keyring=/tmp/emptydir --keyring=/tmp/emptyfile {{ DIST }} /tmp/debian-chroot "deb {{ MIRROR }} {{ DIST }} main" || ret=$?
+# make sure that no [signedby=...] managed to make it into the sources.list
+echo "deb {{ MIRROR }} {{ DIST }} main" | cmp /tmp/debian-chroot/etc/apt/sources.list -
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/logfile b/tests/logfile
new file mode 100644
index 0000000..5e2dbeb
--- /dev/null
+++ b/tests/logfile
@@ -0,0 +1,22 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -rf /tmp/debian-chroot /tmp/log /tmp/trimmed" EXIT INT TERM
+
+# we check the full log to also prevent debug printfs to accidentally make it into a commit
+{{ CMD }} --mode=root --variant=apt --logfile=/tmp/log {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+# omit the last line which should contain the runtime
+head --lines=-1 /tmp/log > /tmp/trimmed
+cat << LOG | diff -u - /tmp/trimmed
+I: chroot architecture {{ HOSTARCH }} is equal to the host's architecture
+I: finding correct signed-by value...
+I: automatically chosen format: directory
+I: running apt-get update...
+I: downloading packages with apt...
+I: extracting archives...
+I: installing essential packages...
+I: cleaning package lists and apt cache...
+LOG
+tail --lines=1 /tmp/log | grep '^I: success in .* seconds$'
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/man b/tests/man
new file mode 100644
index 0000000..b5c38b9
--- /dev/null
+++ b/tests/man
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+# we redirect to /dev/null instead of using --quiet to not cause a broken pipe
+# when grep exits before mmdebstrap was able to write all its output
+{{ CMD }} --man | grep --fixed-strings 'mmdebstrap [OPTION...] [*SUITE* [*TARGET* [*MIRROR*...]]]' >/dev/null
diff --git a/tests/merged-fakechroot-inside-unmerged-chroot b/tests/merged-fakechroot-inside-unmerged-chroot
new file mode 100644
index 0000000..c05ada1
--- /dev/null
+++ b/tests/merged-fakechroot-inside-unmerged-chroot
@@ -0,0 +1,49 @@
+#!/bin/sh
+#
+# make sure that the $FAKECHROOT_CMD_SUBST environment variable is set up
+# such that one can create a merged-/usr chroot from an unmerged-/usr system
+
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+trap "rm -f /tmp/chroot-fakechroot.tar /tmp/chroot-root.tar" EXIT INT TERM
+[ "$(whoami)" = "root" ]
+{{ CMD }} --mode=root --variant=apt --hook-dir=./hooks/merged-usr {{ DIST }} /tmp/chroot-root.tar {{ MIRROR }}
+cat << 'SCRIPT' > script.sh
+#!/bin/sh
+set -exu
+rootfs="$1"
+mkdir -p "$rootfs/mnt/hooks"
+[ -e /usr/libexec/mmdebstrap/ldconfig.fakechroot ] && cp -a /usr/libexec/mmdebstrap/ldconfig.fakechroot "$rootfs/mnt"
+[ -e ./ldconfig.fakechroot ] && cp -a ./ldconfig.fakechroot "$rootfs/mnt"
+[ -e /usr/share/mmdebstrap/hooks/merged-usr ] && cp -a /usr/share/mmdebstrap/hooks/merged-usr "$rootfs/mnt/hooks"
+[ -e ./hooks/merged-usr ] && cp -a ./hooks/merged-usr "$rootfs/mnt/hooks"
+[ -e /usr/bin/mmdebstrap ] && cp -aT /usr/bin/mmdebstrap "$rootfs/usr/bin/mmdebstrap"
+[ -e ./mmdebstrap ] && cp -aT ./mmdebstrap "$rootfs/mnt/mmdebstrap"
+chroot "$rootfs" env --chdir=/mnt \
+ runuser -u user -- \
+ {{ CMD }} --mode=fakechroot --variant=apt \
+ --hook-dir=./hooks/merged-usr \
+ --customize-hook='chroot "$1" echo "$FAKECHROOT_CMD_SUBST" | tr ":" "\n" | sort' \
+ --customize-hook='chroot "$1" sh -c "exec test \"\$(readlink /bin)\" = usr/bin"' \
+ --customize-hook='chroot "$1" sh -c "exec test \"\$(realpath -e /bin/ldd)\" = /usr/bin/ldd"' \
+ --customize-hook='chroot "$1" echo ":$FAKECHROOT_CMD_SUBST" | grep --quiet :/usr/bin/ldd=' \
+ --customize-hook='chroot "$1" echo ":$FAKECHROOT_CMD_SUBST" | grep --quiet :/bin/ldd=' \
+ --customize-hook='chroot "$1" env PATH=/bin ldd /bin/true 2>&1 | grep --quiet "fakeldd: objdump: command not found: install binutils package"' \
+ --customize-hook='chroot "$1" sh -c "exec test \"\$(readlink /sbin)\" = usr/sbin"' \
+ --customize-hook='chroot "$1" sh -c "exec test \"\$(realpath -e /sbin/ldconfig)\" = /usr/sbin/ldconfig"' \
+ --customize-hook='chroot "$1" echo ":$FAKECHROOT_CMD_SUBST" | grep --quiet :/usr/sbin/ldconfig=' \
+ --customize-hook='chroot "$1" echo ":$FAKECHROOT_CMD_SUBST" | grep --quiet :/sbin/ldconfig=' \
+ --customize-hook='chroot "$1" env PATH=/sbin ldconfig 2>&1 | grep --quiet "/usr/bin/env: ‘python3’: No such file or directory"' \
+ {{ DIST }} /tmp/chroot-fakechroot.tar {{ MIRROR }}
+SCRIPT
+chmod +x script.sh
+{{ CMD }} --mode=root --variant=apt --include=perl,python3,passwd,fakeroot,fakechroot \
+ --hook-dir=./hooks/no-merged-usr \
+ --customize-hook='chroot "$1" useradd --home-dir /home/user --create-home user' \
+ --customize-hook='chroot "$1" sh -c "exec test \"\$(realpath -e /usr/bin/ldd)\" = /usr/bin/ldd"' \
+ --customize-hook='chroot "$1" sh -c "exec test ! -e /usr/sbin/ldconfig"' \
+ --customize-hook=./script.sh \
+ --customize-hook="copy-out /tmp/chroot-fakechroot.tar /tmp" \
+ {{ DIST }} /dev/null {{ MIRROR }}
+cmp /tmp/chroot-fakechroot.tar /tmp/chroot-root.tar || diffoscope /tmp/chroot-fakechroot.tar /tmp/chroot-root.tar
diff --git a/tests/mirror-is-deb b/tests/mirror-is-deb
new file mode 100644
index 0000000..c751aad
--- /dev/null
+++ b/tests/mirror-is-deb
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar "deb {{ MIRROR }} {{ DIST }} main"
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/mirror-is-real-file b/tests/mirror-is-real-file
new file mode 100644
index 0000000..76f9efc
--- /dev/null
+++ b/tests/mirror-is-real-file
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar /tmp/sources.list" EXIT INT TERM
+echo "deb {{ MIRROR }} {{ DIST }} main" > /tmp/sources.list
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar /tmp/sources.list
+tar -tf /tmp/debian-chroot.tar \
+ | sed 's#^./etc/apt/sources.list.d/0000sources.list$#./etc/apt/sources.list#' \
+ | sort | diff -u tar1.txt -
diff --git a/tests/mirror-is-stdin b/tests/mirror-is-stdin
new file mode 100644
index 0000000..ba0e376
--- /dev/null
+++ b/tests/mirror-is-stdin
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+echo "deb {{ MIRROR }} {{ DIST }} main" | {{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar -
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/missing-dev-sys-proc-inside-the-chroot b/tests/missing-dev-sys-proc-inside-the-chroot
new file mode 100644
index 0000000..d127911
--- /dev/null
+++ b/tests/missing-dev-sys-proc-inside-the-chroot
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+[ {{ MODE }} = "unshare" ]
+[ {{ VARIANT }} = "custom" ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --include=dpkg,dash,diffutils,coreutils,libc-bin,sed {{ DIST }} /dev/null {{ MIRROR }}
diff --git a/tests/missing-device-nodes-outside-the-chroot b/tests/missing-device-nodes-outside-the-chroot
new file mode 100644
index 0000000..7f2fa27
--- /dev/null
+++ b/tests/missing-device-nodes-outside-the-chroot
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+rm /dev/console
+useradd --home-dir /home/user --create-home user
+runuser -u user -- {{ CMD }} --mode=unshare --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/mmdebstrap b/tests/mmdebstrap
new file mode 100644
index 0000000..3327fc6
--- /dev/null
+++ b/tests/mmdebstrap
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+[ "$(id -u)" -eq 0 ]
+[ {{ MODE }} = "root" ]
+case {{ FORMAT }} in tar|squashfs|ext2) : ;; *) exit 1;; esac
+
+{{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} {{ DIST }} ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.{{ FORMAT }} {{ MIRROR }}
+if [ "{{ FORMAT }}" = tar ]; then
+ printf 'ustar ' | cmp --bytes=6 --ignore-initial=257:0 ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.tar -
+elif [ "{{ FORMAT }}" = squashfs ]; then
+ printf 'hsqs' | cmp --bytes=4 ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.squashfs -
+elif [ "{{ FORMAT }}" = ext2 ]; then
+ printf '\123\357' | cmp --bytes=2 --ignore-initial=1080:0 ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.ext2 -
+else
+ echo "unknown format: {{ FORMAT }}" >&2
+ exit 1
+fi
diff --git a/tests/mount-is-missing b/tests/mount-is-missing
new file mode 100644
index 0000000..2e0c4b0
--- /dev/null
+++ b/tests/mount-is-missing
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+for p in /bin /usr/bin /sbin /usr/sbin; do
+ rm -f "$p/mount"
+done
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/multiple-include b/tests/multiple-include
new file mode 100644
index 0000000..36f53ec
--- /dev/null
+++ b/tests/multiple-include
@@ -0,0 +1,21 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+{{ CMD }} --mode=root --variant=apt --include=doc-debian --include=tzdata {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+rm /tmp/debian-chroot/usr/share/doc-base/doc-debian.debian-*
+rm -r /tmp/debian-chroot/usr/share/doc/debian
+rm -r /tmp/debian-chroot/usr/share/doc/doc-debian
+rm /tmp/debian-chroot/usr/share/lintian/overrides/tzdata
+rm /tmp/debian-chroot/etc/localtime
+rm /tmp/debian-chroot/etc/timezone
+rm -r /tmp/debian-chroot/usr/share/doc/tzdata
+rm -r /tmp/debian-chroot/usr/share/zoneinfo
+rm /tmp/debian-chroot/var/lib/apt/extended_states
+for p in doc-debian tzdata; do
+ for f in list md5sums config postinst postrm templates preinst prerm; do
+ [ -e "/tmp/debian-chroot/var/lib/dpkg/info/$p.$f" ] || continue
+ rm "/tmp/debian-chroot/var/lib/dpkg/info/$p.$f"
+ done
+done
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/no-sbin-in-path b/tests/no-sbin-in-path
new file mode 100644
index 0000000..0cedc0b
--- /dev/null
+++ b/tests/no-sbin-in-path
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# If FAKECHROOT_CMD_SUBST sets up wrong substitutions, then binaries cannot be
+# found. For example if /usr/bin/chroot is listed in FAKECHROOT_CMD_SUBST but
+# /usr/sbin (the actual location of the chroot binary) is not in PATH, the
+# command fails
+
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+
+[ "{{ MODE }}" = "fakechroot" ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix env PATH=/usr/bin:/bin fakechroot fakeroot {{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/not-having-to-install-apt-in-include-because-a-hook-did-it-before b/tests/not-having-to-install-apt-in-include-because-a-hook-did-it-before
new file mode 100644
index 0000000..9a36307
--- /dev/null
+++ b/tests/not-having-to-install-apt-in-include-because-a-hook-did-it-before
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=essential --include=apt \
+ --essential-hook='APT_CONFIG=$MMDEBSTRAP_APT_CONFIG apt-get update' \
+ --essential-hook='APT_CONFIG=$MMDEBSTRAP_APT_CONFIG apt-get --yes install apt' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | grep -v ./var/lib/apt/extended_states | diff -u tar1.txt -
diff --git a/tests/pass-distribution-but-implicitly-write-to-stdout b/tests/pass-distribution-but-implicitly-write-to-stdout
new file mode 100644
index 0000000..16d2243
--- /dev/null
+++ b/tests/pass-distribution-but-implicitly-write-to-stdout
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+cat << HOSTS >> /etc/hosts
+127.0.0.1 deb.debian.org
+127.0.0.1 security.debian.org
+HOSTS
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} > /tmp/debian-chroot.tar
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/pivot_root b/tests/pivot_root
new file mode 100644
index 0000000..860c41b
--- /dev/null
+++ b/tests/pivot_root
@@ -0,0 +1,54 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+trap "rm -f /tmp/chroot1.tar /tmp/chroot2.tar /tmp/chroot3.tar /tmp/mmdebstrap" EXIT INT TERM
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" 2>/dev/null; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt \
+ --include=mount \
+ {{ DIST }} /tmp/chroot1.tar {{ MIRROR }}
+
+if [ {{ MODE }} = "unshare" ]; then
+ # calling pivot_root in root mode does not work for mysterious reasons:
+ # pivot_root: failed to change root from `.' to `mnt': Invalid argument
+ $prefix {{ CMD }} --mode={{ MODE }} --variant=apt --include=mount \
+ --customize-hook='mkdir -p "$1/mnt" "$1/oldroot"' \
+ --customize-hook='[ ! -e /usr/bin/mmdebstrap ] || cp -aT /usr/bin/mmdebstrap "$1/usr/bin/mmdebstrap"' \
+ --customize-hook='[ ! -e ./mmdebstrap ] || cp -aT ./mmdebstrap "$1/mnt/mmdebstrap"' \
+ --customize-hook='mount -o rbind "$1" /mnt && cd /mnt && /sbin/pivot_root . oldroot' \
+ --customize-hook='unshare -U echo nested unprivileged unshare' \
+ --customize-hook='env --chdir=/mnt {{ CMD }} --mode=unshare --variant=apt --include=mount {{ DIST }} /tmp/chroot3.tar {{ MIRROR }}' \
+ --customize-hook='copy-out /tmp/chroot3.tar /tmp' \
+ --customize-hook='rm -f "/usr/bin/mmdebstrap" "/mnt/mmdebstrap"' \
+ --customize-hook='umount -l oldroot sys' \
+ --customize-hook='rmdir /oldroot' \
+ {{ DIST }} /tmp/chroot2.tar {{ MIRROR }}
+
+ cmp /tmp/chroot1.tar /tmp/chroot2.tar || diffoscope /tmp/chroot1.tar /tmp/chroot2.tar
+ cmp /tmp/chroot1.tar /tmp/chroot3.tar || diffoscope /tmp/chroot1.tar /tmp/chroot3.tar
+ rm /tmp/chroot2.tar /tmp/chroot3.tar
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt --include=mount \
+ --customize-hook='mkdir -p "$1/mnt"' \
+ --customize-hook='[ ! -e /usr/bin/mmdebstrap ] || cp -aT /usr/bin/mmdebstrap "$1/usr/bin/mmdebstrap"' \
+ --customize-hook='[ ! -e ./mmdebstrap ] || cp -aT ./mmdebstrap "$1/mnt/mmdebstrap"' \
+ --chrooted-customize-hook='env --chdir=/mnt {{ CMD }} --mode=unshare --variant=apt --include=mount {{ DIST }} /tmp/chroot3.tar {{ MIRROR }}' \
+ --customize-hook='copy-out /tmp/chroot3.tar /tmp' \
+ --customize-hook='rm -f "$1/usr/bin/mmdebstrap" "$1/mnt/mmdebstrap"' \
+ {{ DIST }} /tmp/chroot2.tar {{ MIRROR }}
+
+cmp /tmp/chroot1.tar /tmp/chroot2.tar || diffoscope /tmp/chroot1.tar /tmp/chroot2.tar
+cmp /tmp/chroot1.tar /tmp/chroot3.tar || diffoscope /tmp/chroot1.tar /tmp/chroot3.tar
diff --git a/tests/preserve-mode-of-etc-resolv-conf-and-etc-hostname b/tests/preserve-mode-of-etc-resolv-conf-and-etc-hostname
new file mode 100644
index 0000000..5e5f835
--- /dev/null
+++ b/tests/preserve-mode-of-etc-resolv-conf-and-etc-hostname
@@ -0,0 +1,102 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+for f in /etc/resolv.conf /etc/hostname; do
+ # preserve original content
+ cat "$f" > "$f.bak"
+ # in case $f is a symlink, we replace it by a real file
+ if [ -L "$f" ]; then
+ rm "$f"
+ cp "$f.bak" "$f"
+ fi
+ chmod 644 "$f"
+ [ "$(stat --format=%A "$f")" = "-rw-r--r--" ]
+done
+{{ CMD }} --variant=custom --mode={{ MODE }} {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+for f in /etc/resolv.conf /etc/hostname; do
+ [ "$(stat --format=%A "/tmp/debian-chroot/$f")" = "-rw-r--r--" ]
+done
+rm /tmp/debian-chroot/dev/console
+rm /tmp/debian-chroot/dev/fd
+rm /tmp/debian-chroot/dev/full
+rm /tmp/debian-chroot/dev/null
+rm /tmp/debian-chroot/dev/ptmx
+rm /tmp/debian-chroot/dev/random
+rm /tmp/debian-chroot/dev/stderr
+rm /tmp/debian-chroot/dev/stdin
+rm /tmp/debian-chroot/dev/stdout
+rm /tmp/debian-chroot/dev/tty
+rm /tmp/debian-chroot/dev/urandom
+rm /tmp/debian-chroot/dev/zero
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
+for f in /etc/resolv.conf /etc/hostname; do
+ chmod 755 "$f"
+ [ "$(stat --format=%A "$f")" = "-rwxr-xr-x" ]
+done
+{{ CMD }} --variant=custom --mode={{ MODE }} {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+for f in /etc/resolv.conf /etc/hostname; do
+ [ "$(stat --format=%A "/tmp/debian-chroot/$f")" = "-rwxr-xr-x" ]
+done
+rm /tmp/debian-chroot/dev/console
+rm /tmp/debian-chroot/dev/fd
+rm /tmp/debian-chroot/dev/full
+rm /tmp/debian-chroot/dev/null
+rm /tmp/debian-chroot/dev/ptmx
+rm /tmp/debian-chroot/dev/random
+rm /tmp/debian-chroot/dev/stderr
+rm /tmp/debian-chroot/dev/stdin
+rm /tmp/debian-chroot/dev/stdout
+rm /tmp/debian-chroot/dev/tty
+rm /tmp/debian-chroot/dev/urandom
+rm /tmp/debian-chroot/dev/zero
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
+for f in /etc/resolv.conf /etc/hostname; do
+ rm "$f"
+ ln -s "$f.bak" "$f"
+ [ "$(stat --format=%A "$f")" = "lrwxrwxrwx" ]
+done
+{{ CMD }} --variant=custom --mode={{ MODE }} {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+for f in /etc/resolv.conf /etc/hostname; do
+ [ "$(stat --format=%A "/tmp/debian-chroot/$f")" = "-rw-r--r--" ]
+done
+rm /tmp/debian-chroot/dev/console
+rm /tmp/debian-chroot/dev/fd
+rm /tmp/debian-chroot/dev/full
+rm /tmp/debian-chroot/dev/null
+rm /tmp/debian-chroot/dev/ptmx
+rm /tmp/debian-chroot/dev/random
+rm /tmp/debian-chroot/dev/stderr
+rm /tmp/debian-chroot/dev/stdin
+rm /tmp/debian-chroot/dev/stdout
+rm /tmp/debian-chroot/dev/tty
+rm /tmp/debian-chroot/dev/urandom
+rm /tmp/debian-chroot/dev/zero
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
diff --git a/tests/progress-bars-on-fake-tty b/tests/progress-bars-on-fake-tty
new file mode 100644
index 0000000..e403111
--- /dev/null
+++ b/tests/progress-bars-on-fake-tty
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+script -qfec "{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}" /dev/null
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/quiet b/tests/quiet
new file mode 100644
index 0000000..d1cbb22
--- /dev/null
+++ b/tests/quiet
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+{{ CMD }} --mode=root --variant=apt --quiet {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot
diff --git a/tests/read-from-stdin-write-to-stdout b/tests/read-from-stdin-write-to-stdout
new file mode 100644
index 0000000..960cd3a
--- /dev/null
+++ b/tests/read-from-stdin-write-to-stdout
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm /tmp/debian-chroot.tar" EXIT INT TERM
+echo "deb {{ MIRROR }} {{ DIST }} main" | {{ CMD }} --mode={{ MODE }} --variant=apt > /tmp/debian-chroot.tar
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/remove-start-stop-daemon-and-policy-rc-d-in-hook b/tests/remove-start-stop-daemon-and-policy-rc-d-in-hook
new file mode 100644
index 0000000..d9c4be6
--- /dev/null
+++ b/tests/remove-start-stop-daemon-and-policy-rc-d-in-hook
@@ -0,0 +1,8 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=apt \
+ --customize-hook='rm "$1/usr/sbin/policy-rc.d"; rm "$1/usr/sbin/start-stop-daemon"' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/root-mode-inside-chroot b/tests/root-mode-inside-chroot
new file mode 100644
index 0000000..0049c6b
--- /dev/null
+++ b/tests/root-mode-inside-chroot
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# Same as unshare-as-root-user-inside-chroot but this time we run mmdebstrap in
+# root mode from inside a chroot
+
+set -eu
+export LC_ALL=C.UTF-8
+[ "$(whoami)" = "root" ]
+
+trap "rm -f /tmp/debian-chroot.tar script.sh" EXIT INT TERM
+
+cat << 'SCRIPT' > script.sh
+#!/bin/sh
+set -exu
+rootfs="$1"
+mkdir -p "$rootfs/mnt"
+[ -e /usr/bin/mmdebstrap ] && cp -aT /usr/bin/mmdebstrap "$rootfs/usr/bin/mmdebstrap"
+[ -e ./mmdebstrap ] && cp -aT ./mmdebstrap "$rootfs/mnt/mmdebstrap"
+chroot "$rootfs" env --chdir=/mnt \
+ {{ CMD }} --mode=root --variant=apt \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+SCRIPT
+chmod +x script.sh
+{{ CMD }} --mode=root --variant=apt --include=perl,mount \
+ --customize-hook=./script.sh \
+ --customize-hook="download /tmp/debian-chroot.tar /tmp/debian-chroot.tar" \
+ {{ DIST }} /dev/null {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/root-mode-inside-unshare-chroot b/tests/root-mode-inside-unshare-chroot
new file mode 100644
index 0000000..e953c65
--- /dev/null
+++ b/tests/root-mode-inside-unshare-chroot
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# Same as root-mode-inside-chroot but this time we run mmdebstrap in root mode
+# from inside an unshare chroot.
+
+set -eu
+export LC_ALL=C.UTF-8
+
+[ {{ MODE }} = "unshare" ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+cat << 'SCRIPT' > /tmp/script.sh
+#!/bin/sh
+set -eu
+rootfs="$1"
+mkdir -p "$rootfs/mnt"
+[ -e /usr/bin/mmdebstrap ] && cp -aT /usr/bin/mmdebstrap "$rootfs/usr/bin/mmdebstrap"
+[ -e ./mmdebstrap ] && cp -aT ./mmdebstrap "$rootfs/mnt/mmdebstrap"
+chroot "$rootfs" env --chdir=/mnt \
+ {{ CMD }} --mode=root --variant=apt \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+SCRIPT
+chmod +x /tmp/script.sh
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt --include=perl,mount \
+ --customize-hook=/tmp/script.sh \
+ --customize-hook="download /tmp/debian-chroot.tar /tmp/debian-chroot.tar" \
+ {{ DIST }} /dev/null {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar /tmp/script.sh
diff --git a/tests/root-without-cap-sys-admin b/tests/root-without-cap-sys-admin
new file mode 100644
index 0000000..419f7b3
--- /dev/null
+++ b/tests/root-without-cap-sys-admin
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+[ "$(whoami)" = "root" ]
+
+if grep --null-data --quiet --no-messages '^container=lxc$' /proc/1/environ; then
+ # see https://stackoverflow.com/questions/65748254/
+ echo "cannot run under lxc -- Skipping test..." >&2
+ exit 0
+fi
+
+capsh --drop=cap_sys_admin -- -c 'exec "$@"' exec \
+ {{ CMD }} --mode=root --variant=apt \
+ --customize-hook='chroot "$1" sh -c "test ! -e /proc/self/fd"' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar
diff --git a/tests/sigint-during-customize-hook b/tests/sigint-during-customize-hook
new file mode 100644
index 0000000..c8a4c94
--- /dev/null
+++ b/tests/sigint-during-customize-hook
@@ -0,0 +1,22 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+setsid --wait {{ CMD }} --mode=root --variant=apt --customize-hook='touch hookstarted && sleep 10 && touch fail' {{ DIST }} /tmp/debian-chroot {{ MIRROR }} &
+pid=$!
+while sleep 1; do [ -e hookstarted ] && break; done
+rm hookstarted
+# negative PID values choose the whole process group
+pgid=$((-1*$(ps -p "$pid" -o pgid=)))
+/bin/kill --signal INT -- "$pgid"
+ret=0
+wait $pid || ret=$?
+rm -r /tmp/debian-chroot
+if [ -e fail ]; then
+ echo customize hook was not interrupted >&2
+ rm fail
+ exit 1
+fi
+if [ "$ret" = 0 ]; then
+ echo expected failure but got exit $ret >&2
+ exit 1
+fi
diff --git a/tests/signed-by-with-host-keys b/tests/signed-by-with-host-keys
new file mode 100644
index 0000000..ef0d8c7
--- /dev/null
+++ b/tests/signed-by-with-host-keys
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf 'deb {{ MIRROR }} {{ DIST }} main\n' | cmp /tmp/debian-chroot/etc/apt/sources.list -
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/signed-by-without-host-keys b/tests/signed-by-without-host-keys
new file mode 100644
index 0000000..470a9de
--- /dev/null
+++ b/tests/signed-by-without-host-keys
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+for f in /etc/apt/trusted.gpg.d/*.gpg /etc/apt/trusted.gpg.d/*.asc; do
+ [ -e "$f" ] || continue
+ rm "$f"
+done
+rmdir /etc/apt/trusted.gpg.d
+mkdir /etc/apt/trusted.gpg.d
+{{ CMD }} --mode=root --variant=apt {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf 'deb [signed-by="/usr/share/keyrings/debian-archive-keyring.gpg"] {{ MIRROR }} {{ DIST }} main\n' | cmp /tmp/debian-chroot/etc/apt/sources.list -
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
+rm -r /tmp/debian-chroot
diff --git a/tests/skip-mount b/tests/skip-mount
new file mode 100644
index 0000000..e210d5c
--- /dev/null
+++ b/tests/skip-mount
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+[ "{{ MODE }}" = "unshare" ]
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode=unshare --variant=apt \
+ --skip=chroot/mount/proc,chroot/mount/sys \
+ --customize-hook='mountpoint "$1"/dev/null' \
+ --customize-hook='if mountpoint "$1"/sys; then exit 1; fi' \
+ --customize-hook='if mountpoint "$1"/proc; then exit 1; fi' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/skip-output-dev b/tests/skip-output-dev
new file mode 100644
index 0000000..0766a66
--- /dev/null
+++ b/tests/skip-output-dev
@@ -0,0 +1,35 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+# test this for both unshare and root mode because the code paths creating
+# entries in /dev are different depending on whether mknod is available or not
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt --skip=output/dev {{ DIST }} - {{ MIRROR }} | {
+ tar -t;
+ echo ./dev/console;
+ echo ./dev/fd;
+ echo ./dev/full;
+ echo ./dev/null;
+ echo ./dev/ptmx;
+ echo ./dev/pts/;
+ echo ./dev/random;
+ echo ./dev/shm/;
+ echo ./dev/stderr;
+ echo ./dev/stdin;
+ echo ./dev/stdout;
+ echo ./dev/tty;
+ echo ./dev/urandom;
+ echo ./dev/zero;
+} | sort | diff -u tar1.txt -
diff --git a/tests/skip-output-mknod b/tests/skip-output-mknod
new file mode 100644
index 0000000..8ccbfdf
--- /dev/null
+++ b/tests/skip-output-mknod
@@ -0,0 +1,30 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+# test this for both unshare and root mode because the code paths creating
+# entries in /dev are different depending on whether mknod is available or not
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt --skip=output/mknod \
+ {{ DIST }} - {{ MIRROR }} | {
+ tar -t;
+ echo ./dev/console;
+ echo ./dev/full;
+ echo ./dev/null;
+ echo ./dev/ptmx;
+ echo ./dev/random;
+ echo ./dev/tty;
+ echo ./dev/urandom;
+ echo ./dev/zero;
+} | sort | diff -u tar1.txt -
diff --git a/tests/skip-start-stop-daemon-policy-rc b/tests/skip-start-stop-daemon-policy-rc
new file mode 100644
index 0000000..bdf5469
--- /dev/null
+++ b/tests/skip-start-stop-daemon-policy-rc
@@ -0,0 +1,10 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=apt \
+ --skip=chroot/start-stop-daemon,chroot/policy-rc.d \
+ --customize-hook='test ! -e "$1/sbin/start-stop-daemon.REAL"' \
+ --customize-hook='test ! -e "$1/usr/sbin/policy-rc.d"' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/skip-tar-in-mknod b/tests/skip-tar-in-mknod
new file mode 100644
index 0000000..eb3027a
--- /dev/null
+++ b/tests/skip-tar-in-mknod
@@ -0,0 +1,28 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+[ {{ MODE }} = "unshare" ]
+
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+$prefix {{ CMD }} --mode={{ MODE }} --variant=custom \
+ --skip=update,setup,cleanup,tar-in/mknod \
+ --setup-hook='tar-in ./cache/mmdebstrap-{{ DIST }}-apt.tar /' \
+ '' /tmp/debian-chroot.tar
+
+cmp ./cache/mmdebstrap-{{ DIST }}-apt.tar /tmp/debian-chroot.tar \
+ || diffoscope ./cache/mmdebstrap-{{ DIST }}-apt.tar /tmp/debian-chroot.tar
diff --git a/tests/special-hooks-using-helpers b/tests/special-hooks-using-helpers
new file mode 100644
index 0000000..a211746
--- /dev/null
+++ b/tests/special-hooks-using-helpers
@@ -0,0 +1,28 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+mkfifo /tmp/myfifo
+mkdir /tmp/root
+ln -s /real /tmp/root/link
+mkdir /tmp/root/real
+run_testA() {
+ echo content > /tmp/foo
+ # shellcheck disable=SC2094
+ { { { {{ CMD }} --hook-helper /tmp/root root setup '' 1 upload /tmp/foo "$1" < /tmp/myfifo 3>&-; echo $? >&3; printf "\\000\\000adios";
+ } | {{ CMD }} --hook-listener 1 3>&- >/tmp/myfifo; echo $?; } 3>&1;
+ } | { read -r xs1; [ "$xs1" -eq 0 ]; read -r xs2; [ "$xs2" -eq 0 ]; }
+ echo content | diff -u - /tmp/root/real/foo
+ rm /tmp/foo
+ rm /tmp/root/real/foo
+}
+run_testA link/foo
+run_testA /link/foo
+run_testA ///link///foo///
+run_testA /././link/././foo/././
+run_testA /link/../link/foo
+run_testA /link/../../link/foo
+run_testA /../../link/foo
+rmdir /tmp/root/real
+rm /tmp/root/link
+rmdir /tmp/root
+rm /tmp/myfifo
diff --git a/tests/special-hooks-using-helpers-and-env-vars b/tests/special-hooks-using-helpers-and-env-vars
new file mode 100644
index 0000000..7a1ffeb
--- /dev/null
+++ b/tests/special-hooks-using-helpers-and-env-vars
@@ -0,0 +1,31 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+cat << 'SCRIPT' > /tmp/script.sh
+#!/bin/sh
+set -eu
+echo "MMDEBSTRAP_APT_CONFIG $MMDEBSTRAP_APT_CONFIG"
+echo "$MMDEBSTRAP_HOOK" >> /tmp/hooks
+[ "$MMDEBSTRAP_MODE" = "root" ]
+echo test-content $MMDEBSTRAP_HOOK > test
+{{ CMD }} --hook-helper "$1" "$MMDEBSTRAP_MODE" "$MMDEBSTRAP_HOOK" '' 1 upload test /test <&$MMDEBSTRAP_HOOKSOCK >&$MMDEBSTRAP_HOOKSOCK
+rm test
+echo "content inside chroot:"
+cat "$1/test"
+[ "test-content $MMDEBSTRAP_HOOK" = "$(cat "$1/test")" ]
+{{ CMD }} --hook-helper "$1" "$MMDEBSTRAP_MODE" "$MMDEBSTRAP_HOOK" '' 1 download /test test <&$MMDEBSTRAP_HOOKSOCK >&$MMDEBSTRAP_HOOKSOCK
+echo "content outside chroot:"
+cat test
+[ "test-content $MMDEBSTRAP_HOOK" = "$(cat test)" ]
+rm test
+SCRIPT
+chmod +x /tmp/script.sh
+{{ CMD }} --mode=root --variant=apt \
+ --setup-hook=/tmp/script.sh \
+ --extract-hook=/tmp/script.sh \
+ --essential-hook=/tmp/script.sh \
+ --customize-hook=/tmp/script.sh \
+ {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+printf "setup\nextract\nessential\ncustomize\n" | diff -u - /tmp/hooks
+rm /tmp/script.sh /tmp/hooks
+rm -r /tmp/debian-chroot
diff --git a/tests/special-hooks-with-mode-mode b/tests/special-hooks-with-mode-mode
new file mode 100644
index 0000000..99e2a47
--- /dev/null
+++ b/tests/special-hooks-with-mode-mode
@@ -0,0 +1,148 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+[ "{{ MODE }}" = "fakechroot" ] && prefix="$prefix fakechroot fakeroot"
+symlinktarget=/real
+[ "{{ MODE }}" = "fakechroot" ] && symlinktarget='$1/real'
+echo copy-in-setup > /tmp/copy-in-setup
+echo copy-in-essential > /tmp/copy-in-essential
+echo copy-in-customize > /tmp/copy-in-customize
+echo tar-in-setup > /tmp/tar-in-setup
+echo tar-in-essential > /tmp/tar-in-essential
+echo tar-in-customize > /tmp/tar-in-customize
+tar --numeric-owner --format=pax --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime -C /tmp -cf /tmp/tar-in-setup.tar tar-in-setup
+tar --numeric-owner --format=pax --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime -C /tmp -cf /tmp/tar-in-essential.tar tar-in-essential
+tar --numeric-owner --format=pax --pax-option=exthdr.name=%d/PaxHeaders/%f,delete=atime,delete=ctime -C /tmp -cf /tmp/tar-in-customize.tar tar-in-customize
+rm /tmp/tar-in-setup
+rm /tmp/tar-in-essential
+rm /tmp/tar-in-customize
+echo upload-setup > /tmp/upload-setup
+echo upload-essential > /tmp/upload-essential
+echo upload-customize > /tmp/upload-customize
+mkdir /tmp/sync-in-setup
+mkdir /tmp/sync-in-essential
+mkdir /tmp/sync-in-customize
+echo sync-in-setup > /tmp/sync-in-setup/file
+echo sync-in-essential > /tmp/sync-in-essential/file
+echo sync-in-customize > /tmp/sync-in-customize/file
+$prefix {{ CMD }} --mode={{ MODE }} --variant=apt \
+ --setup-hook='mkdir "$1/real"' \
+ --setup-hook='copy-in /tmp/copy-in-setup /real' \
+ --setup-hook='echo copy-in-setup | cmp "$1/real/copy-in-setup" -' \
+ --setup-hook='rm "$1/real/copy-in-setup"' \
+ --setup-hook='echo copy-out-setup > "$1/real/copy-out-setup"' \
+ --setup-hook='copy-out /real/copy-out-setup /tmp' \
+ --setup-hook='rm "$1/real/copy-out-setup"' \
+ --setup-hook='tar-in /tmp/tar-in-setup.tar /real' \
+ --setup-hook='echo tar-in-setup | cmp "$1/real/tar-in-setup" -' \
+ --setup-hook='tar-out /real/tar-in-setup /tmp/tar-out-setup.tar' \
+ --setup-hook='rm "$1"/real/tar-in-setup' \
+ --setup-hook='upload /tmp/upload-setup /real/upload' \
+ --setup-hook='echo upload-setup | cmp "$1/real/upload" -' \
+ --setup-hook='download /real/upload /tmp/download-setup' \
+ --setup-hook='rm "$1/real/upload"' \
+ --setup-hook='sync-in /tmp/sync-in-setup /real' \
+ --setup-hook='echo sync-in-setup | cmp "$1/real/file" -' \
+ --setup-hook='sync-out /real /tmp/sync-out-setup' \
+ --setup-hook='rm "$1/real/file"' \
+ --essential-hook='ln -s "'"$symlinktarget"'" "$1/symlink"' \
+ --essential-hook='copy-in /tmp/copy-in-essential /symlink' \
+ --essential-hook='echo copy-in-essential | cmp "$1/real/copy-in-essential" -' \
+ --essential-hook='rm "$1/real/copy-in-essential"' \
+ --essential-hook='echo copy-out-essential > "$1/real/copy-out-essential"' \
+ --essential-hook='copy-out /symlink/copy-out-essential /tmp' \
+ --essential-hook='rm "$1/real/copy-out-essential"' \
+ --essential-hook='tar-in /tmp/tar-in-essential.tar /symlink' \
+ --essential-hook='echo tar-in-essential | cmp "$1/real/tar-in-essential" -' \
+ --essential-hook='tar-out /symlink/tar-in-essential /tmp/tar-out-essential.tar' \
+ --essential-hook='rm "$1"/real/tar-in-essential' \
+ --essential-hook='upload /tmp/upload-essential /symlink/upload' \
+ --essential-hook='echo upload-essential | cmp "$1/real/upload" -' \
+ --essential-hook='download /symlink/upload /tmp/download-essential' \
+ --essential-hook='rm "$1/real/upload"' \
+ --essential-hook='sync-in /tmp/sync-in-essential /symlink' \
+ --essential-hook='echo sync-in-essential | cmp "$1/real/file" -' \
+ --essential-hook='sync-out /real /tmp/sync-out-essential' \
+ --essential-hook='rm "$1/real/file"' \
+ --customize-hook='copy-in /tmp/copy-in-customize /symlink' \
+ --customize-hook='echo copy-in-customize | cmp "$1/real/copy-in-customize" -' \
+ --customize-hook='rm "$1/real/copy-in-customize"' \
+ --customize-hook='echo copy-out-customize > "$1/real/copy-out-customize"' \
+ --customize-hook='copy-out /symlink/copy-out-customize /tmp' \
+ --customize-hook='rm "$1/real/copy-out-customize"' \
+ --customize-hook='tar-in /tmp/tar-in-customize.tar /symlink' \
+ --customize-hook='echo tar-in-customize | cmp "$1/real/tar-in-customize" -' \
+ --customize-hook='tar-out /symlink/tar-in-customize /tmp/tar-out-customize.tar' \
+ --customize-hook='rm "$1"/real/tar-in-customize' \
+ --customize-hook='upload /tmp/upload-customize /symlink/upload' \
+ --customize-hook='echo upload-customize | cmp "$1/real/upload" -' \
+ --customize-hook='download /symlink/upload /tmp/download-customize' \
+ --customize-hook='rm "$1/real/upload"' \
+ --customize-hook='sync-in /tmp/sync-in-customize /symlink' \
+ --customize-hook='echo sync-in-customize | cmp "$1/real/file" -' \
+ --customize-hook='sync-out /real /tmp/sync-out-customize' \
+ --customize-hook='rm "$1/real/file"' \
+ --customize-hook='rmdir "$1/real"' \
+ --customize-hook='rm "$1/symlink"' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+for n in setup essential customize; do
+ ret=0
+ cmp /tmp/tar-in-$n.tar /tmp/tar-out-$n.tar || ret=$?
+ if [ "$ret" -ne 0 ]; then
+ if type diffoscope >/dev/null; then
+ diffoscope /tmp/tar-in-$n.tar /tmp/tar-out-$n.tar
+ exit 1
+ else
+ echo "no diffoscope installed" >&2
+ fi
+ if type base64 >/dev/null; then
+ base64 /tmp/tar-in-$n.tar
+ base64 /tmp/tar-out-$n.tar
+ exit 1
+ else
+ echo "no base64 installed" >&2
+ fi
+ if type xxd >/dev/null; then
+ xxd /tmp/tar-in-$n.tar
+ xxd /tmp/tar-out-$n.tar
+ exit 1
+ else
+ echo "no xxd installed" >&2
+ fi
+ exit 1
+ fi
+done
+echo copy-out-setup | cmp /tmp/copy-out-setup -
+echo copy-out-essential | cmp /tmp/copy-out-essential -
+echo copy-out-customize | cmp /tmp/copy-out-customize -
+echo upload-setup | cmp /tmp/download-setup -
+echo upload-essential | cmp /tmp/download-essential -
+echo upload-customize | cmp /tmp/download-customize -
+echo sync-in-setup | cmp /tmp/sync-out-setup/file -
+echo sync-in-essential | cmp /tmp/sync-out-essential/file -
+echo sync-in-customize | cmp /tmp/sync-out-customize/file -
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
+rm /tmp/debian-chroot.tar \
+ /tmp/copy-in-setup /tmp/copy-in-essential /tmp/copy-in-customize \
+ /tmp/copy-out-setup /tmp/copy-out-essential /tmp/copy-out-customize \
+ /tmp/tar-in-setup.tar /tmp/tar-in-essential.tar /tmp/tar-in-customize.tar \
+ /tmp/tar-out-setup.tar /tmp/tar-out-essential.tar /tmp/tar-out-customize.tar \
+ /tmp/upload-setup /tmp/upload-essential /tmp/upload-customize \
+ /tmp/download-setup /tmp/download-essential /tmp/download-customize \
+ /tmp/sync-in-setup/file /tmp/sync-in-essential/file /tmp/sync-in-customize/file \
+ /tmp/sync-out-setup/file /tmp/sync-out-essential/file /tmp/sync-out-customize/file
+rmdir /tmp/sync-in-setup /tmp/sync-in-essential /tmp/sync-in-customize \
+ /tmp/sync-out-setup /tmp/sync-out-essential /tmp/sync-out-customize
diff --git a/tests/stable-default-mirror b/tests/stable-default-mirror
new file mode 100644
index 0000000..c79f9d6
--- /dev/null
+++ b/tests/stable-default-mirror
@@ -0,0 +1,20 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+cat << HOSTS >> /etc/hosts
+127.0.0.1 deb.debian.org
+127.0.0.1 security.debian.org
+HOSTS
+apt-cache policy
+cat /etc/apt/sources.list
+{{ CMD }} --mode=root --variant=apt stable /tmp/debian-chroot
+cat << SOURCES | cmp /tmp/debian-chroot/etc/apt/sources.list
+deb http://deb.debian.org/debian stable main
+deb http://deb.debian.org/debian stable-updates main
+deb http://security.debian.org/debian-security stable-security main
+SOURCES
+rm -r /tmp/debian-chroot
diff --git a/tests/supply-components-manually b/tests/supply-components-manually
new file mode 100644
index 0000000..038b5dc
--- /dev/null
+++ b/tests/supply-components-manually
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=apt --components="main main" --comp="main,main" {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+echo "deb {{ MIRROR }} {{ DIST }} main" | cmp /tmp/debian-chroot/etc/apt/sources.list
+tar -C /tmp/debian-chroot --one-file-system -c . | tar -t | sort | diff -u tar1.txt -
diff --git a/tests/tarfilter-idshift b/tests/tarfilter-idshift
new file mode 100644
index 0000000..731d40f
--- /dev/null
+++ b/tests/tarfilter-idshift
@@ -0,0 +1,58 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+trap "rm -f /tmp/debian-chroot.tar /tmp/debian-chroot-shifted.tar /tmp/debian-chroot.txt /tmp/debian-chroot-shiftedback.tar /tmp/expected; rm -rf /tmp/debian-chroot" EXIT INT TERM
+useradd --home-dir /home/user --create-home user
+echo user:100000:65536 | cmp /etc/subuid -
+echo user:100000:65536 | cmp /etc/subgid -
+# include iputils-ping so that we can verify that tarfilter does not remove
+# extended attributes
+# run through tarshift no-op to create a tarball that should be bit-by-bit
+# identical to a round trip through "tarfilter --idshift X" and "tarfilter --idshift -X"
+runuser -u user -- {{ CMD }} --mode=unshare --variant=apt --include=iputils-ping {{ DIST }} - {{ MIRROR }} \
+ | ./tarfilter --idshift 0 > /tmp/debian-chroot.tar
+# make sure that xattrs are set in the original tarball
+mkdir /tmp/debian-chroot
+tar --xattrs --xattrs-include='*' --directory /tmp/debian-chroot -xf /tmp/debian-chroot.tar ./usr/bin/ping
+echo "/tmp/debian-chroot/usr/bin/ping cap_net_raw=ep" > /tmp/expected
+getcap /tmp/debian-chroot/usr/bin/ping | diff -u /tmp/expected - >&2
+rm /tmp/debian-chroot/usr/bin/ping
+rmdir /tmp/debian-chroot/usr/bin
+rmdir /tmp/debian-chroot/usr
+rmdir /tmp/debian-chroot
+# shift the uid/gid forward by 100000 and backward by 100000
+./tarfilter --idshift 100000 < /tmp/debian-chroot.tar > /tmp/debian-chroot-shifted.tar
+./tarfilter --idshift -100000 < /tmp/debian-chroot-shifted.tar > /tmp/debian-chroot-shiftedback.tar
+# the tarball before and after the roundtrip through tarfilter should be bit
+# by bit identical
+cmp /tmp/debian-chroot.tar /tmp/debian-chroot-shiftedback.tar
+# manually adjust uid/gid and compare "tar -t" output
+tar --numeric-owner -tvf /tmp/debian-chroot.tar \
+ | sed 's# 42/0 # 100042/100000 #' \
+ | sed 's# 0/0 # 100000/100000 #' \
+ | sed 's# 0/5 # 100000/100005 #' \
+ | sed 's# 0/8 # 100000/100008 #' \
+ | sed 's# 0/42 # 100000/100042 #' \
+ | sed 's# 0/43 # 100000/100043 #' \
+ | sed 's# 0/50 # 100000/100050 #' \
+ | sed 's/ \+/ /g' \
+ > /tmp/debian-chroot.txt
+tar --numeric-owner -tvf /tmp/debian-chroot-shifted.tar \
+ | sed 's/ \+/ /g' \
+ | diff -u /tmp/debian-chroot.txt - >&2
+mkdir /tmp/debian-chroot
+tar --xattrs --xattrs-include='*' --directory /tmp/debian-chroot -xf /tmp/debian-chroot-shifted.tar
+echo "100000 100000" > /tmp/expected
+stat --format="%u %g" /tmp/debian-chroot/usr/bin/ping | diff -u /tmp/expected - >&2
+echo "/tmp/debian-chroot/usr/bin/ping cap_net_raw=ep" > /tmp/expected
+getcap /tmp/debian-chroot/usr/bin/ping | diff -u /tmp/expected - >&2
+echo "0 0" > /tmp/expected
+runuser -u user -- {{ CMD }} --unshare-helper /usr/sbin/chroot /tmp/debian-chroot stat --format="%u %g" /usr/bin/ping \
+ | diff -u /tmp/expected - >&2
+echo "/usr/bin/ping cap_net_raw=ep" > /tmp/expected
+runuser -u user -- {{ CMD }} --unshare-helper /usr/sbin/chroot /tmp/debian-chroot getcap /usr/bin/ping \
+ | diff -u /tmp/expected - >&2
diff --git a/tests/unpack-doc-debian b/tests/unpack-doc-debian
new file mode 100644
index 0000000..fe87d13
--- /dev/null
+++ b/tests/unpack-doc-debian
@@ -0,0 +1,57 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+trap "rm -rf /tmp/debian-chroot" EXIT INT TERM
+
+[ {{ VARIANT }} = extract ]
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+[ "{{ MODE }}" = "fakechroot" ] && prefix="$prefix fakechroot fakeroot"
+$prefix {{ CMD }} --mode={{ MODE }} --variant={{ VARIANT }} --include=doc-debian {{ DIST }} /tmp/debian-chroot {{ MIRROR }}
+# delete contents of doc-debian
+rm /tmp/debian-chroot/usr/share/doc-base/doc-debian.debian-*
+rm -r /tmp/debian-chroot/usr/share/doc/debian
+rm -r /tmp/debian-chroot/usr/share/doc/doc-debian
+# delete real files
+rm /tmp/debian-chroot/etc/apt/sources.list
+rm /tmp/debian-chroot/etc/fstab
+rm /tmp/debian-chroot/etc/hostname
+rm /tmp/debian-chroot/etc/resolv.conf
+rm /tmp/debian-chroot/var/lib/dpkg/status
+rm /tmp/debian-chroot/var/lib/dpkg/arch
+rm /tmp/debian-chroot/var/cache/apt/archives/lock
+rm /tmp/debian-chroot/var/lib/apt/lists/lock
+## delete merged usr symlinks
+#rm /tmp/debian-chroot/libx32
+#rm /tmp/debian-chroot/lib64
+#rm /tmp/debian-chroot/lib32
+#rm /tmp/debian-chroot/sbin
+#rm /tmp/debian-chroot/bin
+#rm /tmp/debian-chroot/lib
+# delete ./dev (files might exist or not depending on the mode)
+rm -f /tmp/debian-chroot/dev/console
+rm -f /tmp/debian-chroot/dev/fd
+rm -f /tmp/debian-chroot/dev/full
+rm -f /tmp/debian-chroot/dev/null
+rm -f /tmp/debian-chroot/dev/ptmx
+rm -f /tmp/debian-chroot/dev/random
+rm -f /tmp/debian-chroot/dev/stderr
+rm -f /tmp/debian-chroot/dev/stdin
+rm -f /tmp/debian-chroot/dev/stdout
+rm -f /tmp/debian-chroot/dev/tty
+rm -f /tmp/debian-chroot/dev/urandom
+rm -f /tmp/debian-chroot/dev/zero
+# the rest should be empty directories that we can rmdir recursively
+find /tmp/debian-chroot -depth -print0 | xargs -0 rmdir
diff --git a/tests/unshare-as-root-user b/tests/unshare-as-root-user
new file mode 100644
index 0000000..e1ba4d6
--- /dev/null
+++ b/tests/unshare-as-root-user
@@ -0,0 +1,9 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+[ "$(whoami)" = "root" ]
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+{{ CMD }} --mode=unshare --variant=apt \
+ --customize-hook='chroot "$1" sh -c "test -e /proc/self/fd"' \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/unshare-as-root-user-inside-chroot b/tests/unshare-as-root-user-inside-chroot
new file mode 100644
index 0000000..9c0eb0d
--- /dev/null
+++ b/tests/unshare-as-root-user-inside-chroot
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# Before running unshare mode as root, we run "unshare --mount" but that fails
+# if mmdebstrap itself is executed from within a chroot:
+# unshare: cannot change root filesystem propagation: Invalid argument
+# This test tests the workaround in mmdebstrap using --propagation unchanged
+
+set -eu
+export LC_ALL=C.UTF-8
+[ "$(whoami)" = "root" ]
+trap "rm -f /tmp/debian-chroot.tar script.sh" EXIT INT TERM
+cat << 'SCRIPT' > script.sh
+#!/bin/sh
+set -eu
+rootfs="$1"
+mkdir -p "$rootfs/mnt"
+[ -e /usr/bin/mmdebstrap ] && cp -aT /usr/bin/mmdebstrap "$rootfs/usr/bin/mmdebstrap"
+[ -e ./mmdebstrap ] && cp -aT ./mmdebstrap "$rootfs/mnt/mmdebstrap"
+chroot "$rootfs" env --chdir=/mnt \
+ {{ CMD }} --mode=unshare --variant=apt \
+ {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+SCRIPT
+chmod +x script.sh
+{{ CMD }} --mode=root --variant=apt --include=perl,mount \
+ --customize-hook=./script.sh \
+ --customize-hook="download /tmp/debian-chroot.tar /tmp/debian-chroot.tar" \
+ {{ DIST }} /dev/null {{ MIRROR }}
+tar -tf /tmp/debian-chroot.tar | sort | diff -u tar1.txt -
diff --git a/tests/unshare-include-deb b/tests/unshare-include-deb
new file mode 100644
index 0000000..2b9c54b
--- /dev/null
+++ b/tests/unshare-include-deb
@@ -0,0 +1,49 @@
+#!/bin/sh
+
+set -eu
+export LC_ALL=C.UTF-8
+
+[ "{{ MODE }}" = unshare ]
+
+trap "rm -rf /tmp/dummypkg.deb /tmp/dummypkg" EXIT INT TERM
+
+prefix=
+if [ "$(id -u)" -eq 0 ] && [ "{{ MODE }}" != "root" ] && [ "{{ MODE }}" != "auto" ]; then
+ if ! id "${SUDO_USER:-user}" >/dev/null 2>&1; then
+ if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+ fi
+ useradd --home-dir "/home/${SUDO_USER:-user}" --create-home "${SUDO_USER:-user}"
+ fi
+ prefix="runuser -u ${SUDO_USER:-user} --"
+fi
+
+# instead of obtaining a .deb from our cache, we create a new package because
+# otherwise apt might decide to download the package with the same name and
+# version from the cache instead of using the local .deb
+mkdir -p /tmp/dummypkg/DEBIAN
+cat << END > "/tmp/dummypkg/DEBIAN/control"
+Package: dummypkg
+Priority: optional
+Section: oldlibs
+Maintainer: Johannes Schauer Marin Rodrigues <josch@debian.org>
+Architecture: all
+Multi-Arch: foreign
+Source: dummypkg
+Version: 1
+Description: dummypkg
+END
+dpkg-deb --build "/tmp/dummypkg" "/tmp/dummypkg.deb"
+
+# make the .deb only redable by its owner which will exclude the unshared user
+chmod 600 /tmp/dummypkg.deb
+
+ret=0
+$prefix {{ CMD }} --variant=apt --mode={{ MODE }} --include="/tmp/dummypkg.deb" \
+ {{ DIST }} /dev/null {{ MIRROR }} || ret=$?
+
+if [ "$ret" -eq 0 ]; then
+ echo "expected failure but got exit $ret" >&2
+ exit 1
+fi
diff --git a/tests/variant-custom-timeout b/tests/variant-custom-timeout
new file mode 100644
index 0000000..06f0b42
--- /dev/null
+++ b/tests/variant-custom-timeout
@@ -0,0 +1,11 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+
+# mmdebstrap used to hang forever if apt in custom mode failed to resolve
+# dependencies because a process died without cleaning up its children.
+# https://bugs.debian.org/1017795
+ret=0
+{{ CMD }} --mode={{ MODE }} --variant=custom \
+ --include=this-package-does-not-exist {{ DIST }} /dev/null {{ MIRROR }} || ret=1
+[ $ret -eq 1 ]
diff --git a/tests/verbose b/tests/verbose
new file mode 100644
index 0000000..b0b0fb9
--- /dev/null
+++ b/tests/verbose
@@ -0,0 +1,17 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+export SOURCE_DATE_EPOCH={{ SOURCE_DATE_EPOCH }}
+
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+
+# we use variant standard in verbose mode to see the maximum number of packages
+# that was chosen in case of USE_HOST_APT_CONFIG=yes
+# we use variant important on arches where variant standard is not bit-by-bit
+# reproducible due to #1031276
+case {{ VARIANT }} in standard|-) : ;; *) exit 1;; esac
+
+{{ CMD }} --variant={{ VARIANT }} --verbose {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+
+cmp ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.tar /tmp/debian-chroot.tar \
+ || diffoscope ./cache/mmdebstrap-{{ DIST }}-{{ VARIANT }}.tar /tmp/debian-chroot.tar
diff --git a/tests/version b/tests/version
new file mode 100644
index 0000000..cae04a6
--- /dev/null
+++ b/tests/version
@@ -0,0 +1,6 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+# we redirect to /dev/null instead of using --quiet to not cause a broken pipe
+# when grep exits before mmdebstrap was able to write all its output
+{{ CMD }} --version | grep -E '^mmdebstrap [0-9](\.[0-9])+$' >/dev/null
diff --git a/tests/without-etc-resolv-conf-and-etc-hostname b/tests/without-etc-resolv-conf-and-etc-hostname
new file mode 100644
index 0000000..1d3dfef
--- /dev/null
+++ b/tests/without-etc-resolv-conf-and-etc-hostname
@@ -0,0 +1,14 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+if [ ! -e /mmdebstrap-testenv ]; then
+ echo "this test modifies the system and should only be run inside a container" >&2
+ exit 1
+fi
+trap "rm -f /tmp/debian-chroot.tar" EXIT INT TERM
+rm /etc/resolv.conf /etc/hostname
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar {{ MIRROR }}
+{ tar -tf /tmp/debian-chroot.tar;
+ printf "./etc/hostname\n";
+ printf "./etc/resolv.conf\n";
+} | sort | diff -u tar1.txt -
diff --git a/tests/xz-compressed-tarball b/tests/xz-compressed-tarball
new file mode 100644
index 0000000..6dc6a37
--- /dev/null
+++ b/tests/xz-compressed-tarball
@@ -0,0 +1,7 @@
+#!/bin/sh
+set -eu
+export LC_ALL=C.UTF-8
+trap "rm -f /tmp/debian-chroot.tar.xz" EXIT INT TERM
+{{ CMD }} --mode={{ MODE }} --variant=apt {{ DIST }} /tmp/debian-chroot.tar.xz {{ MIRROR }}
+printf '\3757zXZ\0' | cmp --bytes=6 /tmp/debian-chroot.tar.xz -
+tar -tf /tmp/debian-chroot.tar.xz | sort | diff -u tar1.txt -